docker-compose部署es集群
192.168.10.101
docker-compose.yml
version: '2.2'
services:
kibana:
image: docker.elastic.co/kibana/kibana:7.3.0
container_name: kibana73
restart: always
environment:
- I18N_LOCALE=zh-CN
- XPACK_GRAPH_ENABLED=true
- TIMELION_ENABLED=true
- XPACK_MONITORING_COLLECTION_ENABLED="true"
- ELASTICSEARCH_HOSTS=["http://192.168.10.101:9200","http://192.168.10.102:9200","http://192.168.10.105:9200"]
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- "5601:5601"
networks:
- esnet
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
container_name: es-node1
restart: always
environment:
- node.name=es-node1
- cluster.name=es-cluster
- discovery.seed_hosts=192.168.10.102,192.168.10.105
- cluster.initial_master_nodes=es-node1,es-node2,es-node3
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- esdata1:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
networks:
- esnet
volumes:
esdata1:
driver: local
networks:
esnet:
driver: bridge
kibana.yml
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.name: kibana
server.host: "0.0.0.0"
elasticsearch.hosts: [ "http://192.168.10.101:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.yml
##network.host: 0.0.0.0
## 集群名称,三台集群,要配置相同的集群名称!!!
cluster.name: my-application
# # 节点名称
node.name: es-node1
# # 是否有资格被选举为master,ES默认集群中第一台机器为主节点
node.master: true
# # 是否存储数据
node.data: true
# #最⼤集群节点数,为了避免脑裂,集群节点数最少为 半数+1
#node.max_local_storage_nodes: 3
# # 数据目录
path.data: /usr/share/elasticsearch/data
# log目录
path.logs: /usr/share/elasticsearch/logs
# 修改 network.host 为 0.0.0.0,表示对外开放,如对特定ip开放则改为指定ip
network.host: 0.0.0.0
# 设置对外服务http端口,默认为9200
http.port: 9200
# 内部节点之间沟通端⼝
transport.tcp.port: 9300
# 写⼊候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: [ "192.168.10.102", "192.168.10.105"]
# 初始化⼀个新的集群时需要此配置来选举master
#cluster.initial_master_nodes: ["node-1"]
# 设置集群中N个节点启动时进行数据恢复,默认为1
#gateway.recover_after_nodes: 3
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.zen.minimum_master_nodes: 2
#cluster.initial_master_nodes: ["192.168.10.101", "192.168.10.102", "192.168.10.105"]
cluster.initial_master_nodes: ["es-node1", "es-node2", "es-node3"]
#写本机ip
network.publish_host: 192.168.10.101
192.168.10.102
docker-compose.yml
version: '2.2'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
container_name: es-node2
restart: always
environment:
- node.name=es-node2
- cluster.name=es-cluster
- discovery.seed_hosts=192.168.10.101,192.168.10.105
- cluster.initial_master_nodes=es-node1,es-node2,es-node3
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- esdata2:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
networks:
- esnet
volumes:
esdata2:
driver: local
networks:
esnet:
driver: bridge
elasticsearch.yml
##network.host: 0.0.0.0
## 集群名称,三台集群,要配置相同的集群名称!!!
cluster.name: my-application
# # 节点名称
node.name: es-node2
# # 是否有资格被选举为master,ES默认集群中第一台机器为主节点
node.master: true
# # 是否存储数据
node.data: true
# #最⼤集群节点数,为了避免脑裂,集群节点数最少为 半数+1
#node.max_local_storage_nodes: 3
# # 数据目录
path.data: /usr/share/elasticsearch/data
# log目录
path.logs: /usr/share/elasticsearch/logs
# 修改 network.host 为 0.0.0.0,表示对外开放,如对特定ip开放则改为指定ip
network.host: 0.0.0.0
# 设置对外服务http端口,默认为9200
http.port: 9200
# 内部节点之间沟通端⼝
transport.tcp.port: 9300
# 写⼊候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["192.168.10.101", "192.168.10.105"]
# 初始化⼀个新的集群时需要此配置来选举master
#cluster.initial_master_nodes: ["node-1"]
# 设置集群中N个节点启动时进行数据恢复,默认为1
#gateway.recover_after_nodes: 3
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.zen.minimum_master_nodes: 2
#cluster.initial_master_nodes: ["10.143.159.124", "10.143.159.120", "10.143.159.125"]
cluster.initial_master_nodes: ["es-node1", "es-node2", "es-node3"]
#写本机ip
network.publish_host: 192.168.10.102
192.168.10.105
docker-compose.yml
version: '2.2'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
container_name: es-node3
restart: always
environment:
- node.name=es-node3
- cluster.name=es-cluster
- discovery.seed_hosts=192.168.10.101,192.168.10.102
- cluster.initial_master_nodes=es-node1,es-node2,es-node3
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- esdata3:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9300:9300
networks:
- esnet
volumes:
esdata3:
driver: local
networks:
esnet:
driver: bridge
elasticsearch.yml
##network.host: 0.0.0.0
## 集群名称,三台集群,要配置相同的集群名称!!!
cluster.name: my-application
# # 节点名称
node.name: es-node2
# # 是否有资格被选举为master,ES默认集群中第一台机器为主节点
node.master: true
# # 是否存储数据
node.data: true
# #最⼤集群节点数,为了避免脑裂,集群节点数最少为 半数+1
#node.max_local_storage_nodes: 3
# # 数据目录
path.data: /usr/share/elasticsearch/data
# log目录
path.logs: /usr/share/elasticsearch/logs
# 修改 network.host 为 0.0.0.0,表示对外开放,如对特定ip开放则改为指定ip
network.host: 0.0.0.0
# 设置对外服务http端口,默认为9200
http.port: 9200
# 内部节点之间沟通端⼝
transport.tcp.port: 9300
# 写⼊候选主节点的设备地址,在开启服务后可以被选为主节点
discovery.seed_hosts: ["192.168.10.101", "192.168.10.102"]
# 初始化⼀个新的集群时需要此配置来选举master
#cluster.initial_master_nodes: ["node-1"]
# 设置集群中N个节点启动时进行数据恢复,默认为1
#gateway.recover_after_nodes: 3
http.cors.enabled: true
http.cors.allow-origin: "*"
discovery.zen.minimum_master_nodes: 2
#cluster.initial_master_nodes: ["10.143.159.124", "10.143.159.120", "10.143.159.125"]
cluster.initial_master_nodes: ["es-node1", "es-node2", "es-node3"]
#写本机ip
network.publish_host: 192.168.10.105
Comments | NOTHING