springKafka生产者、消费者,数据发送/接收方式
作者:小风010766
这篇文章主要介绍了springKafka生产者、消费者,数据发送/接收方式,具有很好的参考价值,希望对大家有所帮助,如有错误或未考虑完全的地方,望不吝赐教
配置文件
# 指定kafka server的地址,集群配多个,中间,逗号隔开 spring.kafka.bootstrap-servers= # 配置kafka 授权认证 --开始 spring.kafka.properties.sasl.mechanism=SCRAM-SHA-256 spring.kafka.properties.security.protocol=SASL_PLAINTEXT spring.kafka.producer.properties.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="xxxx" password="xxxxx"; spring.kafka.consumer.properties.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="xxxx" password="xxxx"; # 配置kafka 授权认证 --结束 #重试次数 spring.kafka.producer.retries=3 # 重试间隔时间 (毫秒) spring.kafka.producer.retry.backoff.ms=10000 #批量发送的消息数量 spring.kafka.producer.batch-size=1000 #32MB的批处理缓冲区 spring.kafka.producer.buffer-memory=335544320 #默认消费者组 spring.kafka.consumer.group-id=isms-group #最早未被消费的offset spring.kafka.consumer.auto-offset-reset=earliest #批量一次最大拉取数据量 spring.kafka.consumer.max-poll-records=500 #自动提交时间间隔,单位ms spring.kafka.consumer.auto-commit-interval=1000 #批消费并发量,小于或等于Topic的分区数 spring.kafka.consumer.batch.concurrency=2
import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; import org.springframework.kafka.config.KafkaListenerContainerFactory; import org.springframework.kafka.core.DefaultKafkaConsumerFactory; import org.springframework.kafka.core.DefaultKafkaProducerFactory; import org.springframework.kafka.core.KafkaAdmin; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.kafka.core.ProducerFactory; import org.springframework.kafka.listener.ContainerProperties; import java.util.HashMap; import java.util.Map; @Configuration public class KafkaConfiguration { @Value("${spring.kafka.bootstrap-servers}") private String bootstrapServers; @Value("${spring.kafka.producer.retries}") private Integer retries; @Value("${spring.kafka.producer.retry.backoff.ms}") private Integer retryBackoff; @Value("${spring.kafka.producer.batch-size}") private Integer batchSize; @Value("${spring.kafka.producer.buffer-memory}") private Integer bufferMemory; @Value("${spring.kafka.consumer.group-id}") private String groupId; @Value("${spring.kafka.consumer.auto-offset-reset}") private String autoOffsetReset; @Value("${spring.kafka.consumer.max-poll-records}") private Integer maxPollRecords; @Value("${spring.kafka.consumer.batch.concurrency}") private Integer batchConcurrency; @Value("${spring.kafka.consumer.auto-commit-interval}") private Integer autoCommitInterval; @Value("${spring.kafka.properties.security.protocol}") private String securityProtocol; @Value("${spring.kafka.properties.sasl.mechanism}") private String saslMechanism; @Value("${spring.kafka.producer.properties.sasl.jaas.config}") private String producerSaslJaasConfig; @Value("${spring.kafka.consumer.properties.sasl.jaas.config}") private String consumerSaslJaasConfig; /** * 生产者配置信息 */ @Bean public Map<String, Object> producerConfigs() { Map<String, Object> props = new HashMap<>(); props.put(ProducerConfig.ACKS_CONFIG, "all"); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ProducerConfig.RETRIES_CONFIG, retries); props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoff); props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); props.put(ProducerConfig.LINGER_MS_CONFIG, 1); props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 900000); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); // 消息幂等开关,事务开启必须打开消息幂等,但是幂等可以单独使用 props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol); props.put(SaslConfigs.SASL_MECHANISM, saslMechanism); props.put(SaslConfigs.SASL_JAAS_CONFIG, producerSaslJaasConfig); return props; } /** * 生产者工厂 */ @Bean public ProducerFactory<String, String> producerFactory() { return new DefaultKafkaProducerFactory<>(producerConfigs()); } /** * 生产者模板 */ @Bean public KafkaTemplate<String, String> kafkaTemplate() { return new KafkaTemplate<>(producerFactory()); } @Bean public KafkaAdmin kafkaAdmin() { return new KafkaAdmin(kafkaTemplate().getProducerFactory().getConfigurationProperties()); } /** * 消费者配置信息 */ @Bean public Map<String, Object> consumerConfigs() { Map<String, Object> props = new HashMap<>(); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 600000); props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 300000); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); // 设置自动提交改成false props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG,false); props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,60000); props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol); props.put(SaslConfigs.SASL_MECHANISM, saslMechanism); props.put(SaslConfigs.SASL_JAAS_CONFIG, consumerSaslJaasConfig); return props; } /** * 消费者批量工厂 */ /* @Bean public KafkaListenerContainerFactory<?> batchFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs())); //设置并发量,小于或等于Topic的分区数 factory.setConcurrency(batchConcurrency); factory.getContainerProperties().setPollTimeout(3000); factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG factory.setBatchListener(true); return factory; }*/ /** * 单个消费者 * @return */ @Bean public KafkaListenerContainerFactory<?> singleConsumerFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs())); factory.setConcurrency(1); factory.getContainerProperties().setPollTimeout(3000); factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); return factory; } @Bean public KafkaConsumer<String,String> customKafkaConsumer(){ return new KafkaConsumer<String, String>(consumerConfigs()); } }
生产者
@Component public class SendMessageToKafka { private static final Logger log = LoggerFactory.getLogger(SendMessageToKafka.class); @Resource private KafkaTemplate<String,String> kafkaTemplate; public void sendToKafka(String message,String topicName){ try { String topicDesc = TopicNameEnum.getDescByTopicName(topicName); SendResult<String, String> sendResult = kafkaTemplate.send(topicName, null, message).get(60, TimeUnit.SECONDS); if(sendResult.getRecordMetadata() != null){ log.info("topic对象【{}】发送kafka成功,:{},message sent,topic:{}, partition={}, offset={}",topicDesc,sendResult.getProducerRecord().topic(), sendResult.getRecordMetadata().partition(), sendResult.getRecordMetadata().offset()); }else { log.warn("topic对象【{}】消息发送kafka失败,topic:{},进行重试。。",topicDesc, topicName); // 当kafka发送失败后,进行重试3次。 FunctionResponse<Boolean> response = FunctionUtil.retryExecute(() -> { try { SendResult<String, String> kafkaResult = kafkaTemplate.send(topicName, null, message).get(60, TimeUnit.SECONDS); if(kafkaResult.getRecordMetadata() != null){ return new FunctionResponse<>(true); }else { return new FunctionResponse<>(false); } } catch (Exception e) { return new FunctionResponse<>(false); } }, 3, 2000); if(!response.getResult()) { log.error("topic对象【{}】 message sent,topic:{},最终重试发送失败:{}", topicDesc,topicName, message); } } } catch (Exception e) { log.error("topic【{}】写入kafka失败,{}",topicName,e); } } }
消费者
@Component public class CoreCommandListener { private static final Logger log = LoggerFactory.getLogger(CoreCommandListener.class); @KafkaListener(topics = {"topic名称"},containerFactory="singleConsumerFactory") private void commandConsumer(ConsumerRecord<Object,String> consumerRecord, Acknowledgment ack){ DcpMessage message = JSONObject.parseObject(consumerRecord.value(),DcpMessage.class); log.info("offset:{},partition:{},消费到指令数据:{}",consumerRecord.offset(),consumerRecord.partition(),message); //手动提交 ack.acknowledge(); } }
总结
以上为个人经验,希望能给大家一个参考,也希望大家多多支持脚本之家。