【推荐】2019 Java 开发者跳槽指南.pdf(吐血整理) >>>
kafka篇
0、操作系统参数调整
修改/etc/sysctl.conf fs.file-max = 5000000 fs.nr_open = 2000000 vm.swappiness = 5 使修改生效,执行 #sysctl -p 修改/etc/security/limits.conf
- soft nofile 1500000
- hard nofile 1500000
- soft nproc 1500000
- hard nproc 1500000 验证是否生效 #ulimit -n
1、zookeeper安装
-
安装jdk #wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" https://download.oracle.com/otn-pub/java/jdk/13.0.1+9/cec27d702aa74d5a8630c65ae61e4305/jdk-13.0.1_linux-x64_bin.tar.gz #tar zxvf jdk-13.0.1_linux-x64_bin.tar.gz -C /usr/local #ln -s /usr/local/jdk-13.0.1_linux-x64_bin /usr/local/jdk 配置java_home #cat /etc/profile.d/java.sh
JAVA_HOME=/usr/local/jdk export JRE_HOME=$JAVA_HOME/jre export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib export JAVA_HOME PATH重新加载环境变量 #source /etc/profile
-
安装zookeeper
#wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz #tar zxvf zookeeper-3.4.14.tar.gz -C /usr/local #ln -s zookeeper-3.4.14 /usr/local-
修改zoo.cfg配置如下 cat >/usr/local/zookeeper/conf/zoo.cfg tickTime=2000 initLimit=20 syncLimit=5 dataDir=/data/zookeeper/zkdata dataLogDir=/data/zookeeper/zklogs clientPort=2181 server.101=192.168.83.31:2888:3888 server.102=192.168.83.32:2888:3888 server.103=192.168.83.33:2888:3888 创建zookeeper的datadir与logdir #mkdir -p /data/zookeeper/zkdata #mkdir -p /data/zookeeper/zklogs echo 101>/data/zookeeper/zkdata/myid
-
启停zookeeper 启动 #/usr/local/zookeeper/bin/zkServer.sh start 停止 #/usr/local/zookeeper/bin/zkServer.sh stop
-
验证zookeeper
#netstat -nltp|grep -iE "2181|2888|3888" #如果哪台是Leader,那么它就拥有2888这个端口
-
2、安装kafka
#wget https://www.apache.org/dyn/closer.cgi?path=/kafka/2.3.1/kafka_2.12-2.3.1.tgz
#wget https://www-eu.apache.org/dist/kafka/2.3.1/kafka_2.12-2.3.1.tgz
#tar zxvf kafka_2.12-2.3.1.tgz -C /usr/local
#ln -s kafka_2.12-2.3.1 /usr/local/kafka
kafka 相应配置参数 (/usr/local/kafka/config/server.properties) log.dirs=/data/kafka/logs zookeeper.connect=192.168.83.31:2181,192.168.83.32:2181,192.168.83.33:2181 advertised.listeners=PLAINTEXT://zy-tw-yabo-prod-newgamerecord01:9092 auto.create.topics.enable=false unclean.leader.election.enable=false auto.leader.rebalance.enable=false log.retention.hours=168 message.max.bytes=10000120
JVM参数设置 修改/usr/local/kafka/bin/kafka-server-start.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then export KAFKA_HEAP_OPTS="-Xmx30G -Xms30G" fi export KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true"
2、启停kafka
启动kafka
#/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
正常则显示
停止kafka
#/usr/local/kafka/bin/kafka-server-stop.sh
zookeeper添加服务
vim /usr/local/zookeeper/bin/zkServer.sh
空白处添加: export JAVA_HOME=/usr/local/jdk export PATH=$JAVA_HOME/bin:$PATH
vim /etc/systemd/system/zookeeper.service [Unit] Description=zookeeper.service After=network.target
[Service] Type=forking User=root Group=root ExecStart=/usr/local/zookeeper/bin/zkServer.sh start /usr/local/zookeeper/conf/zoo.cfg ExecStop=/usr/local/zookeeper/bin/zkServer.sh stop /usr/local/zookeeper/conf/zoo.cfg [Install] WantedBy=multi-user.target
systemctl daemon-reload systemctl start zookeeper systemctl status zookeeper
kafka添加服务 vim /usr/local/kafka/bin/kafka-server-start.sh
空白处添加: export JAVA_HOME=/usr/local/jdk export PATH=$JAVA_HOME/bin:$PATH
vim /etc/systemd/system/kafka.service [Unit] Description=kafka.service After=network.target remote-fs.target zookeeper.service
[Service] Type=forking User=root Group=root ExecStart=/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties ExecStop=/usr/local/kafka/bin/kafka-server-stop.sh [Install] WantedBy=multi-user.target
systemctl daemon-reload systemctl start kafka systemctl status kafka
filebeat 篇
参考:https://www.jianshu.com/p/97a4615adcc3 此篇文章描述清楚了如何安装 https://www.cnblogs.com/xishuai/p/elk-elasticsearch-kibana-logstash-filebeat-log4j2.html wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.5.1-linux-x86_64.tar.gz /usr/local/filebeat/filebeat -e -c /usr/local/filebeat/filebeat.yml -path.home /usr/local/filebeat/filebeat -path.config /usr/local/filebeat/ -path.data /data/filebeat -path.logs /data/logs/filebeat /usr/local/filebeat/filebeat -e -c /usr/local/filebeat/filebeat.yml -path.home /usr/local/filebeat/filebeat -path.config /usr/local/filebeat/ -path.data /data/filebeat -path.logs /data/logs/filebeat
[root@aliyun-hk-yabo-test-ownself ~]# cat /usr/local/filebeat/filebeat.yml
name: "192.168.83.40"
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/messages
multiline.pattern: '^\x1b\['
multiline.negate: true
multiline.match: after
fields_under_root: true
fields:
application: proxy-nginx
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
output.kafka:
hosts: ["192.168.83.31:9092","192.168.83.32:9092","192.168.83.33:9092"]
topic: '%{[application]}'
partition.round_robin:
reachable_only: false
required_acks: 1
compression: gzip
max_message_bytes: 10000000
启动 nohup /usr/local/filebeat/filebeat -e -c /usr/local/filebeat/filebeat.yml -path.home /usr/local/filebeat/filebeat -path.config /usr/local/filebeat/ -path.data /data/filebeat -path.logs /data/logs/filebeat &
es 篇
wget https://download.java.net/java/GA/jdk11/13/GPL/openjdk-11.0.1_linux-x64_bin.tar.gz wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.5.1-linux-x86_64.tar.gz
path.data: /data/es
path.logs: /data/es
thread_pool.write.queue_size: 3000
thread_pool.search.queue_size: 3000
network.host: 192.168.83.31
discovery.type: single-node
http.port: 9200
启动: ./elasticsearch -d
logstash 篇
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.5.1.tar.gz
path.data: /var/lib/logstash
path.logs: /var/log/logstash
[root@server1 config]# cat logstash01.conf
input {
kafka {
bootstrap_servers =>"192.168.83.31:9092"
client_id => "logstash01"
consumer_threads => 10
topics => [ "proxy-nginx" ]
}
}
#filter {
#}
output {
elasticsearch {
index => "%{[applications]}_%{+YYYY.MM.dd}"
hosts => ["192.168.83.31:9200"]
}
stdout {
codec => rubydebug
}
}
kibana 篇
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.5.1-linux-x86_64.tar.gz
cat kibana.yml
server.port: 5501
server.host: "192.168.83.31"
elasticsearch.hosts: ["http://192.168.83.31:9200"]
启动 nohup ./kibana &
来源:oschina
链接:https://my.oschina.net/u/3635512/blog/3155120