kafka部署安装
Kafka集群环境搭建部署
1 Kafka的安装部署需要安装jdk
[root@kafka-1 ~]# rpm -ivh jdk-8u191-linux-x64.rpm
2 部署zookeeper
创建zookeeper的data目录和logs目录
[root@kafka-1 ~]# mkdir /opt/elk/kafka/zookeeper/{data,logs} –p
创建myid文件
[root@kafka-1 ~]# echo 1 >/opt/elk/kafka/zookeeper/data/myid
3 配置zookeeper配置文件
[root@kafka-1 config]# cat zookeeper1.properties | grep -v "#" | grep -v "^$"
maxClientCnxns=0
dataDir=/opt/elk/kafka/zookeeper/data
dataLogDir=/opt/elk/kafka/zookeeper/logs
clientPort=2181
maxClientCnxns=20
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.101.28:2888:3888
server.2=192.168.101.24:2888:3888
server.3=192.168.101.25:2888:3888
4 配置kafka配置文件
[root@kafka-1 config]# cat server1.properties | grep -v "#" | grep -v "^$"
broker.id=1
listeners=PLAINTEXT://192.168.101.28:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.101.28:2181,192.168.101.25:2181,192.168.101.24:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
5 修改kafka集群其他节点的配置文件
将zookeeper.properties及server.properties拷贝到集群中其他节点上,修改配置文件中的broker.id,以及myid
Kafka-2节点配置文件
[root@kafka-2 config]# cat /opt/elk/kafka/zookeeper/data/myid
2
[root@kafka-2 config]# cat zookeeper2.properties | grep -v "#" | grep -v "^$"
maxClientCnxns=0
dataDir=/opt/elk/kafka/zookeeper/data
dataLogDir=/opt/elk/kafka/zookeeper/logs
clientPort=2181
maxClientCnxns=20
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.101.28:2888:3888
server.2=192.168.101.24:2888:3888
server.3=192.168.101.25:2888:3888
[root@kafka-2 config]# cat server2.properties | grep -v "#" | grep -v "^$"
broker.id=2
listeners=PLAINTEXT://192.168.101.24:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.101.28:2181,192.168.101.25:2181,192.168.101.24:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
kafka-3配置文件
[root@kafka-3 config]# cat zookeeper3.properties | grep -v "#" | grep -v "^$"
maxClientCnxns=0
dataDir=/opt/elk/kafka/zookeeper/data
dataLogDir=/opt/elk/kafka/zookeeper/logs
clientPort=2181
maxClientCnxns=20
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.101.28:2888:3888
server.2=192.168.101.24:2888:3888
server.3=192.168.101.25:2888:3888
[root@kafka-3 config]# cat server3.properties | grep -v "#" | grep -v "^$"
broker.id=3
listeners=PLAINTEXT://192.168.101.28:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=2
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.101.28:2181,192.168.101.25:2181,192.168.101.24:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
[root@kafka-3 config]# cat /opt/elk/kafka/zookeeper/data/myid
3
6 启动zookeeper集群
[root@kafka-1 config]# nohup /root/kafka_2.11-1.1.0/bin/zookeeper-server-start.sh /root/kafka_2.11-1.1.0/config/zookeeper.properties >>/dev/null 2>&1 &
[root@kafka-2 config]# nohup /root/kafka_2.11-1.1.0/bin/zookeeper-server-start.sh /root/kafka_2.11-1.1.0/config/zookeeper.properties >>/dev/null 2>&1 &
[root@kafka-2 config]# nohup /root/kafka_2.11-1.1.0/bin/zookeeper-server-start.sh /root/kafka_2.11-1.1.0/config/zookeeper.properties >>/dev/null 2>&1 &
验证zookeeper是否正确启动
安装nc安装包
yum install nc –y
可以通过以下指令来查看集群的整体情况
echo stat | nc 127.0.0.1 2181 查看哪个节点被选做为follower或者leader
echo ruok | nc 127.0.0.1 2181 测试是否启动了server,若回复imok表示已经启动
echo dump | nc 127.0.0.1 2181 列出未经处理的会话和临时节点
echo kill | nc 127.0.0.1 2181 关掉server
echo conf | nc 127.0.0.1 2181 输出相关服务配置的详细信息
echo cons | nc 127.0.0.1 2181 列出所有连接到服务器的客户端的完全的连接/会话的详细信息
echo envi | nc 127.0.0.1 2181 输出关于服务环境的详细信息(区别于conf命令)
echo reqs | nc 127.0.0.1 2181 列出未经处理的请求
echo wchs | nc 127.0.0.1 2181 列出服务器watch的详细信息
echo wchs | nc 127.0.0.1 2181 通过session列出服务器watch的详细信息,它的输出是一个与watch相关的会话的列表
echo wchp | nc 127.0.0.1 2181 通过路径列出服务器的watch的详细信息,它输出一个与session相关信息
7 启动kafka集群
[root@kafka-1 bin]# ./kafka-server-start.sh -daemon ../config/server.properties
通过执行jps查看kafka服务是否已经启动
[root@kafka-1 config]# jps
5472 QuorumPeerMain
32643 Jps
16010 Kafka
执行如下指令验证能否创建topic
[root@kafka-1 config]# /root/kafka_2.11-1.1.0/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
作者:小白叨叨
来源链接:https://blog.csdn.net/wzf862187413/article/details/87805829