springboot如何集成消息中间件kafka
kafka
1.kafka安装
安装zookeeper
切到/usr/local
cd /usr/local
下载zookeeper(服务器联网状态)
wget http://mirrors.hust.edu.cn/apache/zookeeper/zookeeper-3.4.13/zookeeper-3.4.13.tar.gz
不能联网就从官网下载到本地,然后上传到服务器
解压
tar -zxvf zookeeper-3.4.13.tar.gz 就会出现zookeeper-3.4.13的文件夹了
解压完成后,需要设置zookeeper的工作目录,存放日志和数据
cd zookeeper-3.4.13
mkdir data
修改配置文件
cd conf
cp zoo_sample.cfg zoo.cfg
vim zoo.cfg
将 dataDir 修改为 /usr/loca/zookeeper-3.4.13/data
启动:
进入zookeeper的bin目录
cd …/bin
./zkServer.sh start
如图,有STARTED 说明启动成功
还可以用过 ./zkServer.sh status 查看状态
下载kafka:
wget http://archive.apache.org/dist/kafka/2.1.0/kafka_2.12-2.1.0.tgz
解压:
tar -zxvf kafka_2.11-1.0.0.tgz
修改配置文件
cd kafka_2.11-1.0.0/config/
vim server.properties
开启 listeners ,在 your.host.name 的地方输入你主机内网ip
开启 advertised.listeners 在 your.host.name 的地方输入你主机外网ip
启动(后台启动):
cd /usr/local/kafka_2.11-1.0.0/
nohup bin/kafka-server-start.sh config/server.properties >/dev/null 2>&1 &
创建一个topic(topic名字为 helloydb):
./bin/kafka-topics.sh --create --zookeeper 172.17.54.58:2181 --replication-factor 1 --partitions 1 --topic helloydb
2.kafka命令
kafka启动命令:
前台: ./kafka-server-start.sh /usr/local/kafka_2.11-2.2.0/config/server.properties
后台: nohup bin/kafka-server-start.sh config/server.properties >/dev/null 2>&1 &
启动kafka消费者
./kafka-console-consumer.sh --bootstrap-server 172.17.54.58:9092 --topic helloydb–from-beginning
启动kafka生产者
./kafka-console-producer.sh --broker-list 172.17.54.58:9092 --topic helloydb
springboot集成kafka
1.配置
在application.yml文件中添加如下配置
`
# kafka 配置
kafka:
代理地址,可以多个
bootstrap-servers: 47.106.139.245:9092
# producerr
producer:
retries: 0
# 每次批量发送消息的数量
batch-size: 16384
buffer-memory: 33554432
# 指定消息key和消息体的编解码方式
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# consumer
consumer:
# 指定默认消费者group - id
group-id: test-consumer-group
auto-offset-reset: earliest
enable-auto-commit: true
auto-commit-interval:
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
`
2.消息类 KafkaMessage
编写好消息类:
package com.minhai.boot.kafka;
import lombok.Data;
import java.util.Date;
/**
* @author
* @Date 2019/9/28 0028 20:33
* @modify 2019/9/28
* @since
*/
@Data
public class KafkaMessage {
private Long id;
private String msg;
private Date sendTime;
}
3.消息发送类 KafkaSender
package com.minhai.boot.kafka;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Repository;
import java.util.Date;
/**
* @author
* @Date 2019/9/28 0028 20:35
* @modify 2019/9/28 0028 v2.8.7 创建
* @since v2.8.7
*/
@Component
@Repository
public class KafkaSender {
/**
* LOGGER 日志
*/
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSender.class);
@Autowired
private KafkaTemplate<String ,String> kafkaTemplate;
private Gson gson = new GsonBuilder().create();
// 发送消息的方法
public void send(){
Long startTime = System.currentTimeMillis();
LOGGER.info("start send message to kafka ,startTime = [{}]" , startTime);
for( int i = 0 ; i < 100 ; i++){
KafkaMessage kafkaMessage = new KafkaMessage();
kafkaMessage.setId( System.currentTimeMillis());
kafkaMessage.setMsg("test kafka message --" + i);
kafkaMessage.setSendTime(new Date());
kafkaTemplate.send("helloydb" , gson.toJson(kafkaMessage));
LOGGER.info("++++++++++++++++ message = {}" , gson.toJson(kafkaMessage));
}
LOGGER.info("end send message to kafka ,endTime = [{}] , timeConsumer is [{}]" , startTime , System.currentTimeMillis()-startTime);
}
}
4.消息接收类 KafkaReceiver
package com.minhai.boot.kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import java.util.Optional;
/**
* @author
* @Date 2019/9/28 0028 20:55
* @modify 2019/9/28 0028 v2.8.7 创建
* @since v2.8.7
*/
@Component
public class KafkaReceiver {
/**
* LOGGER 日志
*/
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaReceiver.class);
@KafkaListener(topics = "helloydb",groupId = "test-consumer-group")
public void listen(ConsumerRecord<?,?> record){
Optional<?> kafkaMessage = Optional.ofNullable( record.value());
if( kafkaMessage.isPresent()){
Object message = kafkaMessage.get();
LOGGER.info("---------------------- message = " + message);
}
}
}
5.发送消息
package com.minhai.boot.kafka;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
/**
* @author
* @Date 2019/10/8 0008 17:38
* @modify 2019/10/8 0008 v2.8.7 创建
* @since v2.8.7
*/
@RestController
@RequestMapping("kafka")
public class KafkaTestController {
@Autowired
private KafkaSender kafkaSender;
@RequestMapping("test")
@ResponseBody
private String testKafka(){
kafkaSender.send();
return "success";
}
}
项目启动后,调用 /kafka/test/ 接口,就能发现在刚刚启动的服务器消费者界面,有消费刚刚用程序发送的100条数据。