-
Notifications
You must be signed in to change notification settings - Fork 36
/
Copy pathhosts.large_sample
193 lines (159 loc) · 2.96 KB
/
hosts.large_sample
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
; Top group to define what kind of environment this is.
; Examples of the group name are "production", "test" and so on.
[production:children]
local
hadoop_all
hadoop_pseudo
kafka_cluster
confluent_kafka_cluster
manage
data_loader
endosnipe
heapstats
; This is a dummy
[local]
localhost
; Top group of all nodes of each component
; This group includes parent groups of each component
[hadoop_all:children]
hadoop_master
hadoop_client
hadoop_slave
hadoop_hbase
hadoop_spark
; Parent group of master nodes
[hadoop_master:children]
hadoop_namenode
hadoop_journalnode
hadoop_zookeeperserver
hadoop_resourcemanager
hadoop_other
hadoop_httpfs
; All of NameNodes
[hadoop_namenode:children]
hadoop_namenode_primary
hadoop_namenode_backup
; Primary NameNode
; This group should have only one node.
[hadoop_namenode_primary]
master01
; Backup NameNode
; This group should have only one node.
[hadoop_namenode_backup]
master02
; All of JournalNodes
[hadoop_journalnode]
master05
master06
master07
; All of Zookeeper nodes
; Each node has a parameter configuration about Zookeeper ID
[hadoop_zookeeperserver]
master05 zookeeper_server_id=1
master06 zookeeper_server_id=2
master07 zookeeper_server_id=3
; All of ResourceManagers
[hadoop_resourcemanager]
master03
master04
; Node to provide misc services
[hadoop_other]
master08
; SlaveNodes
[hadoop_slave]
slave01
slave02
slave03
slave04
slave05
slave06
slave07
slave08
slave09
slave10
; Used as a client of Hadoop
[hadoop_client:children]
hadoop_hive
hadoop_oozie
hadoop_pig
[hadoop_hive]
client01
[hadoop_oozie]
client01
[hadoop_pig]
client01
; Spark Standalone cluster
; Sorry, Spark Standalone configuration is not implemented yet.
[hadoop_spark:children]
hadoop_spark_master
hadoop_spark_worker
[hadoop_spark_master]
master01
[hadoop_spark_worker]
slave01
slave02
slave03
slave04
slave05
slave06
slave07
slave08
slave09
slave10
; Node to provide HttpFS service
[hadoop_httpfs]
master08
; Top group of HBase cluster
; Sorry, HBase configuration is not implemented yet.
[hadoop_hbase:children]
hadoop_hbase_master
hadoop_hbase_regionserver
[hadoop_hbase_master]
master01
[hadoop_hbase_regionserver]
slave01
slave02
slave03
slave04
slave05
[hadoop_pseudo]
pseudo
[kafka_cluster]
kafka01 zookeeper_server_id=1 kafka_broker_id=1
kafka02 zookeeper_server_id=2 kafka_broker_id=2
kafka03 zookeeper_server_id=3 kafka_broker_id=3
[confluent_kafka_cluster]
kafka01 zookeeper_server_id=1 confluent_kafka_broker_id=1
kafka02 zookeeper_server_id=2 confluent_kafka_broker_id=2
kafka03 zookeeper_server_id=3 confluent_kafka_broker_id=3
; This servers should be different from Kafka brokers
; because of separation of workdloads.
[confluent_kafka_connect]
kafka01
kafka02
kafka03
[confluent_schema_registry]
kafka01
[confluent_kafka_rest]
kafka01
[manage]
manage
[data_loader]
loader01
[endosnipe:children]
endo_javelin
endo_dashboard
[endo_javelin]
slave01
slave02
slave03
slave04
slave05
[endo_dashboard]
manage
[heapstats]
slave01
slave02
slave03
slave04
slave05