Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for cluster monitoring operator with persistent storage #59

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ the larger openshift-ansible inventory file for your deployment.
3. Storage for Applications + Metrics
4. Storage for Applications + Registry + Logging + Metrics
5. Storage for Applications Only
6. Storage for Applications + Registry + Logging + Metrics + Cluster-Monitoring-Operator
------------------------------------------------------------
Enter your choice [1-5] : 4
------------------------------------------------------------
Expand Down
151 changes: 151 additions & 0 deletions openshift_cic/cic.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def perform_operation():
print ("3. Storage for Applications + Metrics ")
print ("4. Storage for Applications + Registry + Logging + Metrics")
print ("5. Storage for Applications Only")
print ("6. Storage for Applications + Registry + Logging + Metrics + Cluster-Monitoring-Operator")
print (60 * '-')

is_valid=0
Expand Down Expand Up @@ -526,7 +527,157 @@ def perform_operation():
raw_storage_size=raw_storage_size)
#Print the output
print(output)
elif choice == 6:
print (60 * '-')
print ("For this configuration 7 nodes are recommended")
print ("With a minimum of 3 required ")
print (60 * '-' )
avail_hosts = int(raw_input("How many nodes are available ?: "))

if avail_hosts >= 6:
app_hosts = raw_input("What hosts will be used for application storage (IP/FQDN) ?: ")
app_hosts = utils.check_input(app_hosts)
utils.min_hosts(app_hosts)
utils.host_not_valid(app_hosts)
raw_devices = raw_input("What are the raw storage devices for these hosts(/dev/<device>) ?: ")
raw_devices = utils.check_input(raw_devices)
raw_storage_size = int(raw_input("What is the size of each raw storage device (GB) ?: "))
registry_pvsize = int(raw_input("What is the size for the registry persistent volume (GB)?: "))
replica_log = int(raw_input("How many replicas for logging ?: "))
logging_pvsize = int(raw_input("What is the size for each logging persistent volume (GB) ?: "))
prom_pvsize = int(raw_input("What is the size for each prometheus persistent volume (GB) ?: "))
prom_log = 2
alert_pvsize = int(raw_input("What is the size for each alertmanager persistent volume (GB) ?: "))
alert_log = 3
replica_metrics = 1
metrics_pvsize = int(raw_input("What is the size for each metrics persistent volume (GB) ?: "))
met_log_hosts = raw_input("What hosts will be used for CNS logging + metrics backend storage (IP/FQDN) ?: ")
met_log_hosts = utils.check_input(met_log_hosts)
utils.both_in_use(met_log_hosts, app_hosts)
met_log_devices = raw_input("What are the raw storage devices for logging + metrics backend on these hosts (/dev/<device>) ?: ")
met_log_devices = utils.check_input(met_log_devices)
met_log_storage_size = int(raw_input("What is the size of each raw storage device (GB) ? : "))
zone = [1,2,3]

min_block_host_vol_size = (logging_pvsize * replica_log) + (replica_metrics * metrics_pvsize) + (alert_log * alert_pvsize) + (prom_log * prom_pvsize)
block_host_size = int ( min_block_host_vol_size + (30/100.0) * min_block_host_vol_size)
# Two cluster total storage calculation
cluster_storage = len(raw_devices) * raw_storage_size * len(app_hosts)
# App storage in Replica 3
total_avail_store = cluster_storage / 3.0
# log metrics store Replica-3
total_reg_store = (met_log_storage_size * len(met_log_devices) * len(met_log_hosts)) / 3.0

block_calc = registry_pvsize + block_host_size
totalalloc = block_calc


print "# Cluster 1"
print "# Total Storage allocated (GB) = 0"
print "# Total Storage available (GB) = %d" % total_avail_store
print " "
print "# Cluster 2"
print "# Total Storage allocated (GB) = %d" % totalalloc
print "# Total Storage available (GB) = %d" % total_reg_store

if registry_pvsize > met_log_storage_size and totalalloc < total_reg_store :
print "\033[91m Warning one or more persistent volumes are"
print "larger than the raw storage device size\033[0m"
print " "
exit()

elif registry_pvsize < met_log_storage_size and totalalloc > total_reg_store :
print "\033[91mWarning your Total Storage available is less "
print "than the Total Storage allocated\033[0m"
exit()

elif registry_pvsize > met_log_storage_size and totalalloc > total_reg_store :
print "\033[91mWarning one or more persistent volumes are"
print "larger than the raw storage device size"
print " "
print "Warning your Total Storage available is less "
print "than the Total Storage allocated\033[0m"
exit()
print " "
template = utils.get_template_input(ocpver, 'applogmetclus-multi.j2')
output = template.render(ver=ver,app_hosts=app_hosts,
raw_devices=json.dumps(raw_devices),
raw_storage_size=raw_storage_size,
block_host_size=block_host_size,
registry_pvsize=registry_pvsize,
logging_pvsize=logging_pvsize,
replica_log=replica_log,
met_log_hosts=met_log_hosts,
metrics_pvsize = metrics_pvsize,
prom_pvsize = prom_pvsize,
alert_pvsize = alert_pvsize,
met_log_devices=met_log_devices)
print(output)
else:
app_hosts = raw_input("What hosts will be used for application storage (IP/FQDN) ?: ")
app_hosts = utils.check_input(app_hosts)
utils.min_hosts(app_hosts)
utils.host_not_valid(app_hosts)
raw_devices = raw_input("What are the raw storage devices for these hosts(/dev/<device>) ?: ")
raw_devices = utils.check_input(raw_devices)
raw_storage_size = int(raw_input("What is the size of each raw storage device (GB) ?: "))
registry_pvsize = int(raw_input("What is the size for the registry persistent volume (GB)?: "))
replica_log = int(raw_input("How many replicas for logging ?: "))
logging_pvsize = int(raw_input("What is the size for each logging persistent volume (GB) ?: "))
prom_pvsize = int(raw_input("What is the size for each prometheus persistent volume (GB) ?: "))
prom_log = 2
alert_pvsize = int(raw_input("What is the size for each alertmanager persistent volume (GB) ?: "))
alert_log = 3
replica_metrics = 1
metrics_pvsize = int(raw_input("What is the size for each metrics persistent volume (GB) ?: "))
zone = [1,2,3]

min_block_host_vol_size = (logging_pvsize * replica_log) + (replica_metrics * metrics_pvsize) + (alert_log * alert_pvsize) + (prom_log * prom_pvsize)
block_host_size = int ( min_block_host_vol_size + (30/100.0) * min_block_host_vol_size)


# Single cluster total storage calculation
cluster_storage = len(raw_devices) * raw_storage_size * len(app_hosts)
total_avail_store = cluster_storage / 3.0
block_calc = registry_pvsize + block_host_size
totalcalc = block_calc

print "# Cluster 1"
print "# Total Storage allocated (GB) = %d" % block_calc
print "# Total Storage available (GB) = %d" % total_avail_store

if block_calc > total_avail_store and registry_pvsize < total_avail_store:
print "\033[91mWarning Your Total Storage available is less "
print "than the Total Storage allocated\033[0m"
exit()

elif block_calc < total_avail_store and registry_pvsize > total_avail_store:
print "\033[91m Warning one or more persistent volumes are"
print "larger than the raw storage device size\033[0m"
print " "
exit()

elif block_calc > total_avail_store and registry_pvsize > total_avail_store:
print "\033[91mWarning one or more persistent volumes are"
print "larger than the raw storage device size"
print " "
print "Warning your Total Storage available is less "
print "than the Total Storage allocated\033[0m"
exit()
print " "

template = utils.get_template_input(ocpver, 'applogmetclus.j2')
output = template.render(ver=ver,app_hosts=app_hosts,
raw_devices=json.dumps(raw_devices),
raw_storage_size=raw_storage_size,
block_host_size=block_host_size,
registry_pvsize=registry_pvsize,
logging_pvsize=logging_pvsize,
metrics_pvsize = metrics_pvsize,
prom_pvsize = prom_pvsize,
alert_pvsize = alert_pvsize,
replica_log=replica_log)
print(output)
else:
print ("Invalid number. Try again...")

Expand Down
74 changes: 74 additions & 0 deletions openshift_cic/templates/311/applogmetclus-multi.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
[OSEv3:children]
glusterfs
glusterfs_registry

[OSEv3:vars]
# registry
openshift_hosted_registry_storage_kind=glusterfs
openshift_hosted_registry_storage_volume_size={{registry_pvsize}}Gi
openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true"

# logging
openshift_logging_install_logging=true
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size={{logging_pvsize}}Gi
openshift_logging_es_cluster_size={{replica_log}}
openshift_logging_es_pvc_storage_class_name='glusterfs-registry-block'
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}

# metrics
openshift_metrics_install_metrics=true
openshift_metrics_storage_kind=dynamic
openshift_metrics_storage_volume_size={{metrics_pvsize}}Gi
openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-registry-block'
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}

# Cluster Monitoring
openshift_cluster_monitoring_operator_prometheus_storage_enabled: true
openshift_cluster_monitoring_operator_prometheus_storage_capacity: {{prom_pvsize}}Gi
openshift_cluster_monitoring_operator_prometheus_storage_class_name: "glusterfs-registry-block"
openshift_cluster_monitoring_operator_alertmanager_storage_enabled: true
openshift_cluster_monitoring_operator_alertmanager_storage_class_name: "glusterfs-registry-block"
openshift_cluster_monitoring_operator_alertmanager_storage_capacity: {{alert_pvsize}}Gi

# Container image to use for glusterfs pods
openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:v{{ver}}"

# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:v{{ver}}"

# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:v{{ver}}"

# CNS storage cluster
openshift_storage_glusterfs_namespace=app-storage
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=false
openshift_storage_glusterfs_block_deploy=false
openshift_storage_glusterfs_block_host_vol_create=false
openshift_storage_glusterfs_block_host_vol_size=100
openshift_storage_glusterfs_block_storageclass=false
openshift_storage_glusterfs_block_storageclass_default=false

# CNS storage for OpenShift infrastructure
openshift_storage_glusterfs_registry_namespace=infra-storage
openshift_storage_glusterfs_registry_storageclass=false
openshift_storage_glusterfs_registry_block_deploy=true
openshift_storage_glusterfs_registry_block_host_vol_create=true
openshift_storage_glusterfs_registry_block_host_vol_size={{block_host_size}}
openshift_storage_glusterfs_registry_block_storageclass=true
openshift_storage_glusterfs_registry_block_storageclass_default=false

[glusterfs]
{% for app in app_hosts -%}
{{app}} glusterfs_zone={{loop.cycle(1,2,3)}} glusterfs_devices='{{ raw_devices }}'
{% endfor %}

[glusterfs_registry]
{% for z in met_log_hosts -%}
{{z}} glusterfs_zone={{loop.cycle(1,2,3)}} glusterfs_devices='{{ met_log_devices }}'
{% endfor -%}
62 changes: 62 additions & 0 deletions openshift_cic/templates/311/applogmetclus.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@

[OSEv3:children]
glusterfs

[OSEv3:vars]
# registry
openshift_hosted_registry_storage_kind=glusterfs
openshift_hosted_registry_storage_volume_size={{registry_pvsize}}Gi
openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true"

# logging
openshift_logging_install_logging=true
openshift_logging_es_pvc_dynamic=true
openshift_logging_es_pvc_size={{logging_pvsize}}Gi
openshift_logging_es_cluster_size={{replica_log}}
openshift_logging_es_pvc_storage_class_name='glusterfs-storage-block'
openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"}

# metrics
openshift_metrics_install_metrics=true
openshift_metrics_storage_kind=dynamic
openshift_metrics_storage_volume_size={{metrics_pvsize}}Gi
openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-storage-block'
openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"}
openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"}

# Cluster Monitoring
openshift_cluster_monitoring_operator_prometheus_storage_enabled: true
openshift_cluster_monitoring_operator_prometheus_storage_capacity: {{prom_pvsize}}Gi
openshift_cluster_monitoring_operator_prometheus_storage_class_name: "glusterfs-storage-block"
openshift_cluster_monitoring_operator_alertmanager_storage_enabled: true
openshift_cluster_monitoring_operator_alertmanager_storage_class_name: "glusterfs-storage-block"
openshift_cluster_monitoring_operator_alertmanager_storage_capacity: {{alert_pvsize}}Gi

# Container image to use for glusterfs pods
openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:v{{ver}}"

# Container image to use for glusterblock-provisioner pod
openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:v{{ver}}"

# Container image to use for heketi pods
openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:v{{ver}}"

# CNS storage cluster
openshift_storage_glusterfs_namespace=app-storage
openshift_storage_glusterfs_storageclass=true
openshift_storage_glusterfs_storageclass_default=false
openshift_storage_glusterfs_block_deploy=true
openshift_storage_glusterfs_block_host_vol_create=true
openshift_storage_glusterfs_block_host_vol_size={{block_host_size}}
openshift_storage_glusterfs_block_storageclass=true
openshift_storage_glusterfs_block_storageclass_default=false


[glusterfs]
{% for app in app_hosts -%}
{{app}} glusterfs_zone={{loop.cycle(1,2,3)}} glusterfs_devices='{{ raw_devices }}'
{% endfor -%}