-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
171 lines (156 loc) · 4.08 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
version: "3.7"
services:
### [WAS] ###
flask:
build: ./flask-web
container_name: flask
restart: always
ports:
- "5000:5000"
hostname: flask
networks:
- hadoop
depends_on:
- postgres
volumes:
- ./flask-web/log:/tmp/logs
ngnix:
build: ./nginx
container_name: nginx
restart: always
ports:
- "8080:8080"
networks:
- hadoop
volumes:
- ./nginx/log:/var/log/nginx
### [DB] ###
postgres:
image: postgres
hostname: postgres
container_name: postgres
restart: always
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: simple
networks:
- hadoop
ports:
- 5432:5432
volumes:
- postgres_data:/var/lib/postgresql/data
### [ELK] ###
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: es01
restart: always
environment:
node.name: es01
cluster.name: es-docker-cluster
discovery.seed_hosts: es02,es03
cluster.initial_master_nodes: es01,es02,es03
bootstrap.memory_lock: "true"
ES_JAVA_OPTS: -Xms512m -Xmx512m
xpack.security.enabled: "false"
xpack.monitoring.enabled: "false"
ulimits: # 프로세스 자원 한도 설정
memlock: # 메모리 내 주소공간의 최대 크기 (sfot: 기본 적용 값, hard: soft에서 최대로 늘릴 한도)
soft: -1
hard: -1
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
networks:
- hadoop
healthcheck:
test: ["CMD", "curl","-s" ,"-f", "http://localhost:9200/_cat/health"]
interval: 30s
start_period: 60s
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: es02
restart: always
environment:
node.name: es02
cluster.name: es-docker-cluster
discovery.seed_hosts: es01,es03
cluster.initial_master_nodes: es01,es02,es03
bootstrap.memory_lock: "true"
ES_JAVA_OPTS: -Xms512m -Xmx512m
xpack.security.enabled: "false"
xpack.monitoring.enabled: "false"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data02:/usr/share/elasticsearch/data
networks:
- hadoop
es03:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
container_name: es03
restart: always
environment:
node.name: es03
cluster.name: es-docker-cluster
discovery.seed_hosts: es01,es02
cluster.initial_master_nodes: es01,es02,es03
bootstrap.memory_lock: "true"
ES_JAVA_OPTS: -Xms512m -Xmx512m
xpack.security.enabled: "false"
xpack.monitoring.enabled: "false"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- data03:/usr/share/elasticsearch/data
networks:
- hadoop
kib01:
image: docker.elastic.co/kibana/kibana:7.13.2
container_name: kib01
ports:
- 5601:5601
environment:
ELASTICSEARCH_URL: http://es01:9200
ELASTICSEARCH_HOSTS: '["http://es01:9200","http://es02:9200","http://es03:9200"]'
networks:
- hadoop
healthcheck:
test: ["CMD", "curl","-s" ,"-f", "http://es01:5601/_cat/health"]
interval: 30s
start_period: 60s
logstash:
image: docker.elastic.co/logstash/logstash:7.13.2
container_name: logstash
ports:
- "5001:5001/tcp"
- "5001:5001/udp"
- "9600:9600"
environment:
ES_JAVA_OPTS: -Xms512m -Xmx512m
volumes:
- ./elk/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./elk/logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml
- ./elk/logstash/pipeline/:/usr/share/logstash/pipeline/
- ./nginx/log:/var/log/nginx
- ./flask-web/log:/var/log/flask-web
networks:
- hadoop
restart: always
healthcheck:
test: ["CMD", "curl","-s" ,"-f", "http://es01:9600/_cat/health"]
interval: 30s
start_period: 60s
volumes:
data01:
data02:
data03:
postgres_data:
networks:
hadoop:
name: hadoop