主节点node1:elasticsearch.yml
cluster.name: "elk" node.name: node1 node.master: true node.data: false network.bind_host: 0.0.0.0 network.publish_host: 192.168.10.44 http.port: 9201 transport.tcp.port: 9301 http.cors.enabled: true http.cors.allow-origin: "*" discovery.seed_hosts: ["192.168.10.44:9301","192.168.10.44:9302","192.168.10.44:9303"] cluster.initial_master_nodes: ["node1"] xpack.monitoring.collection.enabled: true数据节点node2:elasticsearch.yml
cluster.name: "elk" node.name: node2 node.master: false node.data: true network.bind_host: 0.0.0.0 network.publish_host: 192.168.10.44 http.port: 9202 transport.tcp.port: 9302 http.cors.enabled: true http.cors.allow-origin: "*" discovery.seed_hosts: ["192.168.10.44:9301","192.168.10.44:9302","192.168.10.44:9303"] cluster.initial_master_nodes: ["node1"] xpack.monitoring.collection.enabled: true数据节点node3:elasticsearch.yml
cluster.name: "elk" node.name: node3 node.master: false node.data: true network.bind_host: 0.0.0.0 network.publish_host: 192.168.10.44 http.port: 9203 transport.tcp.port: 9303 http.cors.enabled: true http.cors.allow-origin: "*" discovery.seed_hosts: ["192.168.10.44:9301","192.168.10.44:9302","192.168.10.44:9303"] cluster.initial_master_nodes: ["node1"] xpack.monitoring.collection.enabled: truestatus状态为green,视为集群搭建成功
curl 192.168.10.44:9201/_cluster/health {"cluster_name":"elk","status":"green","timed_out":false,"number_of_nodes":3,"number_of_data_nodes":2,"active_primary_shards":9,"active_shards":18,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":0,"delayed_unassigned_shards":0,"number_of_pending_tasks":0,"number_of_in_flight_fetch":0,"task_max_waiting_in_queue_millis":0,"active_shards_percent_as_number":100.0}kibana.yml
server.port: 5601 server.host: "0.0.0.0" elasticsearch.hosts: ["http://192.168.10.44:9201","http://192.168.10.44:9202","http://192.168.10.44:9203"]http://192.168.10.44:5601/ 点击Dev tools,查看集群健康
redis.conf
bind 0.0.0.0 daemonize no pidfile "/var/run/redis.pid" port 6380 timeout 300 loglevel warning logfile "redis.log" databases 16 rdbcompression yes dbfilename "redis.rdb" dir "/data" requirepass "123456" masterauth "123456" maxclients 10000 maxmemory 1000mb maxmemory-policy allkeys-lru appendonly yes appendfsync alwayslogstash.yml
http.host: "0.0.0.0" xpack.monitoring.enabled: true xpack.monitoring.elasticsearch.hosts: ["http://192.168.10.44:9201","http://192.168.10.44:9202","http://192.168.10.44:9203"]pipelines.yml
- pipeline.id: docker path.config: "/usr/share/logstash/pipeline/docker.conf"docker.conf
input { redis { host => "192.168.10.44" port => 6380 db => 0 key => "localhost" password => "123456" data_type => "list" threads => 4 tags => "localhost" } } output { if "localhost" in [tags] { if [fields][function] == "docker" { elasticsearch { hosts => ["192.168.10.44:9201","192.168.10.44:9202","192.168.10.44:9203"] index => "docker-localhost-%{+YYYY.MM.dd}" } } } }filebeat.yml
filebeat.config.modules: path: ${path.config}/modules.d/*.yml reload.enabled: false setup.template.settings: index.number_of_shards: 1 filebeat.inputs: - type: docker enabled: true combine_partial: true containers: path: "/var/lib/docker/containers" ids: - '*' processors: - add_docker_metadata: ~ encoding: utf-8 max_bytes: 104857600 tail_files: true fields: function: docker #processors: # - add_host_metadata: ~ # - add_cloud_metadata: ~ output.redis: hosts: ["192.168.10.44:6380"] password: "123456" db: 0 key: "localhost" worker: 4 timeout: 5 max_retries: 3 codec.json: pretty: false monitoring.enabled: true monitoring.elasticsearch: hosts: ["http://192.168.10.44:9201","http://192.168.10.44:9202","http://192.168.10.44:9203"]点击Management,再点击Kibana下面的Index Patterns,然后Create index pattern
点击Discover