Skip to content

官网相关教程

安装ElasticSearch还是先要看下官方网站。

本系列教程基于ElasticSearch 7.x版本。

安装ElasticSearch

https://github.com/pursue-wind/elk-docker-compose

Ubuntu 解决报错

Ubuntu elasticsearch max virtual memory areas vm.max_map_count [65530] is too low, increase to at le

解决办法: 1、切换到root用户修改配置sysctl.conf

vi /etc/sysctl.conf

添加下面配置:

vm.max_map_count=655360

并执行命令:

sysctl -p

然后,重新启动elasticsearch,即可启动成功。

docker-compose.yml

yaml
version: '3.3'
services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch1
    environment:
      - node.name=elasticsearch1
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512M -Xmx512M"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=_eth0_
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 512M
        reservations:
          cpus: '1'
          memory: 1G
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata1
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
      - ingress
    ports:
      - 9200:9200
      - 9300:9300
  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch2
    environment:
      - node.name=elasticsearch2
      - cluster.name=docker-cluster2
      - cluster.initial_master_nodes=elasticsearch1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512M -Xmx512M"
      - "discovery.zen.ping.unicast.hosts=elasticsearch1"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=_eth0_
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 512M
        reservations:
          cpus: '1'
          memory: 512M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata2
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
      - ingress
    ports:
      - 9201:9200
  elasticsearch3:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch3
    environment:
      - node.name=elasticsearch3
      - cluster.name=docker-cluster3
      - cluster.initial_master_nodes=elasticsearch1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512M -Xmx512M"
      - "discovery.zen.ping.unicast.hosts=elasticsearch1"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=_eth0_
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 512M
        reservations:
          cpus: '1'
          memory: 512M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata3
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
      - ingress
    ports:
      - 9202:9200
  kibana:
    image: docker.elastic.co/kibana/kibana:7.8.0
    container_name: kibana
    environment:
      SERVER_NAME: localhost
      ELASTICSEARCH_URL: http://elasticsearch1:9200/
    ports:
      - 5601:5601
    volumes:
      - type: volume
        source: logs
        target: /var/log
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 512M
        reservations:
          cpus: '1'
          memory: 512M
      restart_policy:
        condition: on-failure
        delay: 30s
        max_attempts: 3
        window: 120s
    networks:
      - elastic
      - ingress
  auditbeat:
    image: docker.elastic.co/beats/auditbeat:7.8.0
    command: auditbeat -e -strict.perms=false
    user: root
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    cap_add: ['AUDIT_CONTROL', 'AUDIT_READ']
    pid: "host"
    volumes:
    #   - ${PWD}/configs/auditbeat.docker.yml:/usr/share/auditbeat/auditbeat.yml
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - elastic
  metricbeat:
    image: docker.elastic.co/beats/metricbeat:7.8.0
    # command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    cap_add:
      - AUDIT_CONTROL
      - AUDIT_READ
    volumes:
      # - ${PWD}/configs/metricbeat.docker.yml:/usr/share/metricbeat/metricbeat.yml
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - /sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro
      - /proc:/hostfs/proc:ro
      - /:/hostfs:ro
    networks:
      - elastic

  heartbeat:
    image: docker.elastic.co/beats/heartbeat:7.8.0
    command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    # volumes:
    #   - ${PWD}/configs/heartbeat.docker.yml:/usr/share/heartbeat/heartbeat.yml
    networks:
      - elastic

  packetbeat:
    image: docker.elastic.co/beats/packetbeat:7.8.0
    command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    cap_add:
      - NET_RAW
      - NET_ADMIN
    # volumes:
    #   - ${PWD}/configs/packetbeat.docker.yml:/usr/share/packetbeat/packetbeat.yml
    networks:
      - elastic

  filebeat:
    image: docker.elastic.co/beats/filebeat:7.8.0
    command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    ports:
      - 9000:9000
    volumes:
      # - ${PWD}/configs/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - elastic
  apmserver:
    image: docker.elastic.co/apm/apm-server:7.8.0
    command: --strict.perms=false
    ports:
      - 8200:8200
      - 8201:8200
    environment:
      - apm-server.host=0.0.0.0
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    # volumes:
    #   - ${PWD}/configs/apm-server.yml:/usr/share/apm-server/apm-server.yml
    networks:
        - elastic
  app-search:
    image: docker.elastic.co/app-search/app-search:7.6.2
    ports:
      - 3002:3002
    environment:
      secret_session_key: supersecretsessionkey
      elasticsearch.host: http://elasticsearch1:9200/
      allow_es_settings_modification: "true"
    networks:
        - elastic
  nginx:
    image: nginx:latest
    ports:
        - 8881:80
    volumes:
        - ${PWD}/nginx-config/:/etc/nginx/conf.d/
    command: /bin/bash -c "nginx -g 'daemon off;'"
    ulimits:
      nproc: 65535
    networks:
      - ingress
volumes:
  esdata1:
  esdata2:
  esdata3:
  logs:

networks:
  elastic:
  ingress:

# configs:
#   auditbeat_config:
#     file: configs/auditbeat.docker.yml
#   filebeat_config:
#     file: configs/filebeat.docker.yml
#   heartbeat_config:
#     file: configs/heartbeat.docker.yml
#   metricbeat_config:
#     file: configs/metricbeat.docker.yml
#   packetbeat_config:
#     file: configs/packetbeat.docker.yml

文章来源于自己总结和网络转载,内容如有任何问题,请大佬斧正!联系我