使用Docker快速搭建ELK日志系统集群
ELK 是 ElasticSearch、Logstash、Kibana 的简称,一般用于日志系统,从日志收集,日志转储,日志展示等入手,用以提供简洁高效的日志处理机制。这篇文章记录一下我用 Docker 搭建 ELK,并且结合 filebeat 实现自动化地把所有 Docker 容器的日志数据传输给 ELK 的过程。
ElasticSearch
- master节点
创建es-master.yml配置文件
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-master
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: false
# 锁住内存,不被使用到交换分区去, 生产必须为true,内存锁定检查,目的是内存地址直接映射,减少一次copy时间
#bootstrap.memory_lock: true
# 系统过滤检查,防止数据损坏,考虑集群安全,生产设置成false
#bootstrap.system_call_filter: false
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9200
# 设置节点间交互的tcp端口
transport.port: 9300
# 集群发现
discovery.seed_hosts:
- es-master
- es-slave1
- es-slave2
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
- es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
# 安全认证
xpack.license.self_generated.type: trial
xpack.security.enabled: true
xpack.monitoring.collection.enabled: true
################################ basic ssl #########################################
# xpack.security.enabled: true
# xpack.license.self_generated.type: basic
# xpack.security.transport.ssl.enabled: true
# xpack.security.transport.ssl.verification_mode: certificate
# xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
# xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12
####################################################################################
2.slave1节点
创建es-slave1.yml配置文件
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-slave1
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: true
# 锁住内存,不被使用到交换分区去
#bootstrap.memory_lock: true
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9201
# 设置节点间交互的tcp端口
#transport.port: 9301
# 集群发现
discovery.seed_hosts:
- es-master
- es-slave1
- es-slave2
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
- es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
# 安全认证
xpack.license.self_generated.type: trial
xpack.security.enabled: true
xpack.monitoring.collection.enabled: true
################################ basic ssl #########################################
# xpack.security.enabled: true
# xpack.license.self_generated.type: basic
# xpack.security.transport.ssl.enabled: true
# xpack.security.transport.ssl.verification_mode: certificate
# xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
# xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12
####################################################################################
3.slave2节点
创建es-slave2.yml配置文件
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-slave2
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: true
# 锁住内存,不被使用到交换分区去
#bootstrap.memory_lock: true
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9202
# 设置节点间交互的tcp端口
#transport.port: 9302
# 集群发现
discovery.seed_hosts:
- es-master
- es-slave1
- es-slave2
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
- es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
# 安全认证
xpack.license.self_generated.type: trial
xpack.security.enabled: true
xpack.monitoring.collection.enabled: true
################################ basic ssl #########################################
# xpack.security.enabled: true
# xpack.license.self_generated.type: basic
# xpack.security.transport.ssl.enabled: true
# xpack.security.transport.ssl.verification_mode: certificate
# xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12
# xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12
####################################################################################
Logstash
创建logstash.yml配置文件
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: ["http://es-master:9200"]
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "123456"
Kibana
创建kibana.yml配置文件
# 服务端口
server.port: 5601
# 服务IP
server.host: "0.0.0.0"
# ES
elasticsearch.hosts: ["http://es-master:9200"]
# 汉化
i18n.locale: "zh-CN"
xpack.monitoring.ui.container.elasticsearch.enabled: true
# X-Pack安全凭证
elasticsearch.username: "elastic"
elasticsearch.password: "123456"
Filebeat
创建filebeat.yml配置文件
filebeat.inputs:
- type: log
enabled: true
paths:
# 日志文件路径列表,可用通配符,不递归
- /var/log/*.log
- /var/log/*.json
# - /var/log/php/php-fpm.log
multiline.pattern: ^\[
multiline.negate: true
multiline.match: after
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.dashboards.enabled: false
setup.kibana:
host: "http://kibana:5601"
# 直接传输至es
#output.elasticsearch:
# hosts: ["http://es-master:9200"]
# index: "filebeat-%{[beat.version]}-%{+yyyy.MM.dd}"
# username: 'elastic'
# password: '123456'
output.logstash:
hosts: ["logstash:5044"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
最后创建logstash-filebeat.conf
input {
# 来源beats
beats {
# 端口
port => "5044"
}
}
#input {
# 来源文件
# file {
# path => ["/var/log/logstash/nginx.log"]
# start_position => "beginning"
# sincedb_path => "nul"
# type => "nginx"
# codec => "json"
# }
#}
# 分析、过滤插件,可以多个
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}"}
}
geoip {
source => "clientip"
}
}
output {
# 输出选择elasticsearch
elasticsearch {
hosts => ["http://es-master:9200"]
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
user => "elastic"
password => "123456"
}
}
Docker-compose
创建docker-compose.yml文件
version: "3.2"
services:
es-master:
container_name: es-master
hostname: es-master
image: elasticsearch:7.4.2
restart: always
ports:
- 9200:9200
- 9300:9300
volumes:
- ./elasticsearch/master/conf/es-master.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/master/data:/usr/share/elasticsearch/data
- ./elasticsearch/master/logs:/usr/share/elasticsearch/logs
- ./escerts:/usr/share/elasticsearch/config/certs
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- TIMEZONE=Asia/ShangHai
- ELASTIC_PASSWORD=123456
#ulimits:
# memlock:
# soft: -1
# hard: -1
# nofile:
# soft: 65536
# hard: 65536
networks:
- elknet
es-slave1:
container_name: es-slave1
image: elasticsearch:7.4.2
restart: always
ports:
- 9201:9200
- 9301:9300
volumes:
- ./elasticsearch/slave1/conf/es-slave1.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/slave1/data:/usr/share/elasticsearch/data
- ./elasticsearch/slave1/logs:/usr/share/elasticsearch/logs
- ./escerts:/usr/share/elasticsearch/config/certs
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- TIMEZONE=Asia/ShangHai
#ulimits:
# memlock:
# soft: -1
# hard: -1
# nofile:
# soft: 65536
# hard: 65536
networks:
- elknet
es-slave2:
container_name: es-slave2
image: elasticsearch:7.4.2
restart: always
ports:
- 9202:9200
- 9302:9300
volumes:
- ./elasticsearch/slave2/conf/es-slave2.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/slave2/data:/usr/share/elasticsearch/data
- ./elasticsearch/slave2/logs:/usr/share/elasticsearch/logs
- ./escerts:/usr/share/elasticsearch/config/certs
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- TIMEZONE=Asia/ShangHai
#ulimits:
# memlock:
# soft: -1
# hard: -1
# nofile:
# soft: 65536
# hard: 65536
networks:
- elknet
# kibana
kibana:
container_name: kibana
hostname: kibana
image: kibana:7.4.2
restart: always
ports:
- 5601:5601
volumes:
- ./kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml
environment:
- elasticsearch.hosts=http://es-master:9200
- TIMEZONE=Asia/ShangHai
depends_on:
- es-master
- es-slave1
- es-slave2
networks:
- elknet
# logstash
logstash:
container_name: logstash
hostname: logstash
image: logstash:7.4.2
command: logstash -f ./conf/logstash-filebeat.conf
restart: always
volumes:
# 映射到容器中
- ./logstash/conf/logstash-filebeat.conf:/usr/share/logstash/conf/logstash-filebeat.conf
- ./logstash/conf/logstash.yml:/usr/share/logstash/config/logstash.yml
environment:
- elasticsearch.hosts=http://es-master:9200
# 解决logstash监控连接报错
- xpack.monitoring.elasticsearch.hosts=http://es-master:9200
- TIMEZONE=Asia/ShangHai
ports:
- 5044:5044
depends_on:
- es-master
- es-slave1
- es-slave2
networks:
- elknet
# filebeat
filebeat:
container_name: filebeat
hostname: filebeat
image: docker.io/elastic/filebeat:7.4.2
restart: always
volumes:
- ./filebeat/conf/filebeat.yml:/usr/share/filebeat/filebeat.yml
- ./logs:/var/log
- ./filebeat/logs:/usr/share/filebeat/logs
- ./filebeat/data:/usr/share/filebeat/data
# 将指定容器连接到当前连接,可以设置别名,避免ip方式导致的容器重启动态改变的无法连接情况
environment:
- TIMEZONE=Asia/ShangHai
links:
- logstash
# 依赖服务[可无]
depends_on:
- es-master
- es-slave1
- es-slave2
networks:
- elknet
# es-head
es-head:
container_name: es-head
image: mobz/elasticsearch-head:5
restart: always
ports:
- 9100:9100
depends_on:
- es-master
- es-slave1
- es-slave2
networks:
- elknet
# es监控工具
cerebro:
container_name: cerebro
image: lmenezes/cerebro
restart: always
ports:
- 9001:9000
command:
- -Dhosts.0.host=http://es-master:9200
networks:
- elknet
# es索引管理
#curator:
# container_name: curator
# image: elastic/curator
# environment:
# ELASTICSEARCH_HOST: http://es-master:9200
# CRON: "30 0 * * *" # 每天 0 点 30 分清理 ${UNIT_COUNT} 天之前的索引
# CONFIG_FILE: /usr/share/curator/config/curator.yml
# COMMAND: /usr/share/curator/config/delete_log_files_curator.yml
# UNIT_COUNT: 10
# network_mode: "host"
# restart: always
# depends_on:
# - es-master
# - es-slave1
# - es-slave2
# docker可视化工具
#portainer:
# container_name: portainer
# image: portainer/portainer
# ports:
# - 9002:9002
# command: -H unix:///var/run/docker.sock
# volumes:
# - /var/run/docker.sock:/var/run/docker.sock
# - ./portainer-data:/data
# networks:
# - elknet
networks:
elknet:
driver: bridge
启动脚本
vim elk.sh
#./bin/bash
# 定义颜色
BLUE_COLOR="\033[36m"
RED_COLOR="\033[31m"
GREEN_COLOR="\033[32m"
VIOLET_COLOR="\033[35m"
RES="\033[0m"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"
echo -e "${BLUE_COLOR}# Docker ELK Shell Script #${RES}"
echo -e "${BLUE_COLOR}# Blog: www.voocel.com #${RES}"
echo -e "${BLUE_COLOR}# Email: voocel@gmail.com #${RES}"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"
if [ "$(free -g|awk '/^Mem/{print $2-$3}')" -le 4 ];then
echo "可用内存小于4g,退出!"
exit
fi
#优化系统
if [ -z "$(grep vm.max_map_count /etc/sysctl.conf)" ];then
echo 'vm.max_map_count=655360' >>/etc/sysctl.conf
else
sed -i 's/vm.max_map_count.*/vm.max_map_count=655360/g' /etc/sysctl.conf
fi
sysctl -p
# 创建目录
echo -e "${BLUE_COLOR}---> create [elasticsearch]directory start.${RES}"
if [ ! -d "./elasticsearch/" ]; then
mkdir -p ./elasticsearch/master/conf ./elasticsearch/master/data ./elasticsearch/master/logs \
./elasticsearch/slave1/conf ./elasticsearch/slave1/data ./elasticsearch/slave1/logs \
./elasticsearch/slave2/conf ./elasticsearch/slave2/data ./elasticsearch/slave2/logs
fi
if [ ! -d "./escerts/" ]; then
mkdir ./escerts
fi
if [ ! -d "./logs/" ]; then
mkdir ./logs
fi
echo -e "${RED_COLOR}---> create [kibana]directory start.${RES}"
if [ ! -d "./kibana/" ]; then
mkdir -p ./kibana/conf ./kibana/logs
fi
echo -e "${GREEN_COLOR}---> create [logstash]directory start.${RES}"
if [ ! -d "./logstash/" ]; then
mkdir -p ./logstash/conf ./logstash/logs
fi
echo -e "${GREEN_COLOR}---> create [filebeat]directory start.${RES}"
if [ ! -d "./filebeat/" ]; then
mkdir -p ./filebeat/conf ./filebeat/logs ./filebeat/data
fi
echo -e "${VIOLET_COLOR}---> create [nginx]directory start.${RES}"
if [ ! -d "./nginx/" ]; then
mkdir -p ./nginx/conf ./nginx/logs ./nginx/www
fi
echo -e "${BLUE_COLOR}===> create directory success.${RES}"
# 目录授权(data/logs 都要授读/写权限)
echo -e "${BLUE_COLOR}---> directory authorize start.${RES}"
if [ -d "./elasticsearch/" ]; then
chmod 777 ./elasticsearch/master/data/ ./elasticsearch/master/logs/ \
./elasticsearch/slave1/data/ ./elasticsearch/slave1/logs/ \
./elasticsearch/slave2/data/ ./elasticsearch/slave2/logs
fi
if [ -d "./escerts/" ]; then
chmod 777 ./escerts
fi
if [ -d "./filebeat/" ]; then
chmod 777 ./filebeat/data/ ./filebeat/logs/
fi
echo -e "${BLUE_COLOR}===> directory authorize success.${RES}"
# 移动配置文件
echo -e "${BLUE_COLOR}---> move [elasticsearch]config file start.${RES}"
if [ -f "./es-master.yml" ] && [ -f "./es-slave1.yml" ] && [ -f "./es-slave2.yml" ]; then
mv ./es-master.yml ./elasticsearch/master/conf
mv ./es-slave1.yml ./elasticsearch/slave1/conf
mv ./es-slave2.yml ./elasticsearch/slave2/conf
fi
echo -e "${RED_COLOR}---> move [kibana]config file start.${RES}"
if [ -f "./kibana.yml" ]; then
mv ./kibana.yml ./kibana/conf
fi
echo -e "${GREEN_COLOR}---> move [logstash]config file start.${RES}"
if [ -f "./logstash-filebeat.conf" ]; then
mv ./logstash-filebeat.conf ./logstash/conf
fi
if [ -f "./logstash.yml" ]; then
mv ./logstash.yml ./logstash/conf
chmod 777 ./logstash/conf/logstash.yml ./logstash/logs
fi
echo -e "${GREEN_COLOR}---> move [filebeat]config file start.${RES}"
if [ -f "./filebeat.yml" ]; then
mv ./filebeat.yml ./filebeat/conf
chown root:root filebeat.yml
chmod 644 ./filebeat/conf/filebeat.yml
fi
echo -e "${VIOLET_COLOR}---> move [nginx]config file start.${RES}"
if [ -f "./nginx.conf" ]; then
mv ./nginx.conf ./nginx/conf
fi
echo -e "${BLUE_COLOR}===> move config files success.${RES}"
echo -e "${GREEN_COLOR}>>>>>>>>>>>>>>>>>> The End <<<<<<<<<<<<<<<<<<${RES}"
# 部署项目
echo -e "${BLUE_COLOR}==================> Docker deploy Start <==================${RES}"
docker-compose up --build -d
启动
chmod +x elk.sh
./elk.sh