准备工作
安装kafka+zookeeper环境
利用命令创建好topic
Pom文件,引入spring-kafka jar包这里需要注意2个地方:
kafka-clients 包版本与服务器端kafka-clients版本保持一致(查看服务器kafka版本方法 在kafka安装目录下libs 中查找kafka-clients开头的jar文件)
引入的spring-kafka 版本在2.0或者2.X 时Spring版本在5.0才能支持
..........
org.springframework.kafka
spring-kafka
2.1.8.RELEASE
..........
参考官网 http://kafka.apache.org/documentation/
XML配置方式
生产者
配置:
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd">
class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
如上图,xml主要配置了KafkaTemplate的构造参数producerFactory和autoFlush,对应了一个KafkaTemplate源码中的2参构造函数。
producerProperties:设置生产者工厂需要的配置
producerFactory:定义了生产者工厂构造方法
kafkaTemplate:定义了使用producerFactory和是否自动刷新,2个参数来构造kafka生产者模板类
发送消息:
ListenableFuture> listenableFuture = kafkaTemplate.send("topic", "partition","key","data");//发送成功回调
SuccessCallback> successCallback = new SuccessCallback>() {@Override public void onSuccess(SendResultresult) { //成功业务逻辑
}}//发送失败回调
FailureCallback failureCallback = newFailureCallback() {@Override public voidonFailure(Throwable ex) { //失败业务逻辑
}}listenableFuture.addCallback(successCallback, failureCallback);
消费者
配置:
value="org.apache.kafka.common.serialization.StringDeserializer" />
value="org.apache.kafka.common.serialization.StringDeserializer" />
class="org.springframework.kafka.core.DefaultKafkaConsumerFactory" >
${kafka.consumer.topic.credit.for.lease}
${loan.application.feedback.topic}
consumerProperties-》consumerFactory 载入配置构造消费者工厂
messageListener-》containerProperties 载入容器配置(topics)
consumerFactory+containerProperties-》messageListenerContainer 容器配置(topics)+消息监听器,构造一个并发消息监听容器,并执行初始化方法doStart
需要注意. KafkaConsumerSerivceImpl 此类 需要实现 MessageListener 接口
消费消息:
方案1:直接实现MessageListener接口,复写onMessage方法,实现自定义消费业务逻辑。
public class KafkaConsumerSerivceImpl implements MessageListener{@Override public void onMessage(ConsumerRecorddata) { //根据不同主题,消费
if("主题1".equals(data.topic())){ //逻辑1
}else if("主题2".equals(data.topic())){ //逻辑2
}} }
方案2:使用@KafkaListener注解,并设置topic,支持SPEL表达式。这样方便拆分多个不同topic处理不同业务逻辑。(特别是有自己的事务的时候,尤其方便)
import org.springframework.kafka.annotation.KafkaListener;
public classKafkaConsumerSerivceImpl { @KafkaListener(topics = "${templar.aggrement.agreementWithhold.topic}") void templarAgreementNoticewithhold(ConsumerRecorddata){ //消费业务逻辑
}}
Java注解方式
生产者
配置:
/**
* @description kafka 生产者配置*/
@Configuration@EnableKafka public classKafkaProducerConfig { publicKafkaProducerConfig(){ System.out.println("kafka生产者配置");}@Bean public ProducerFactoryproducerFactory() { return newDefaultKafkaProducerFactory(producerProperties());}
@Bean public MapproducerProperties() { Map props = new HashMap(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, PropertiesUtil.getInstance().getString("kafka.producer.bootstrap.servers")); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, PropertiesUtil.getInstance().getString("kafka.producer.key.serializer")); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,PropertiesUtil.getInstance().getString("kafka.producer.value.serializer")); props.put(ProducerConfig.RETRIES_CONFIG,PropertiesUtil.getInstance().getInt("kafka.producer.retries")); props.put(ProducerConfig.BATCH_SIZE_CONFIG,PropertiesUtil.getInstance().getInt("kafka.producer.batch.size",1048576)); props.put(ProducerConfig.LINGER_MS_CONFIG,PropertiesUtil.getInstance().getInt("kafka.producer.linger.ms")); props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,PropertiesUtil.getInstance().getLong("kafka.producer.buffer.memory",33554432L)); props.put(ProducerConfig.ACKS_CONFIG,PropertiesUtil.getInstance().getString("kafka.producer.acks","all")); returnprops;}
@Bean public KafkaTemplatekafkaTemplate() { KafkaTemplate kafkaTemplate = new KafkaTemplate(producerFactory(),true); kafkaTemplate.setDefaultTopic(PropertiesUtil.getInstance().getString("kafka.producer.defaultTopic","default")); returnkafkaTemplate;}
}
发送消息:
跟xml配置一样。
消费者
配置:
/**
* @description kafka 消费者配置*/
@Configuration@EnableKafka public classKafkaConsumerConfig { publicKafkaConsumerConfig(){ System.out.println("kafka消费者配置加载...");}@Bean KafkaListenerContainerFactory>
kafkaListenerContainerFactory() { ConcurrentKafkaListenerContainerFactory factory =
newConcurrentKafkaListenerContainerFactory();factory.setConsumerFactory(consumerFactory()); factory.setConcurrency(3); factory.getContainerProperties().setPollTimeout(3000); returnfactory;}
@Bean public ConsumerFactoryconsumerFactory() { return newDefaultKafkaConsumerFactory(consumerProperties());}
@Bean public MapconsumerProperties() { Map props= new HashMap(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.bootstrap.servers")); props.put(ConsumerConfig.GROUP_ID_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.group.id")); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.enable.auto.commit")); props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.auto.commit.interval.ms")); props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.session.timeout.ms")); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.key.deserializer")); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, PropertiesUtil.getInstance().getString("kafka.consumer.value.deserializer")); returnprops;}
@Bean publicKafkaConsumerListener kafkaConsumerListener(){ return newKafkaConsumerListener();}
}
消费消息:
跟xml配置一样。
引用: