From fe4314da44e7565760b30b0fe15b30236deda5ee Mon Sep 17 00:00:00 2001 From: JoshVanL Date: Thu, 21 Feb 2019 15:42:23 +0000 Subject: [PATCH] Ensure Kubernetes resources are absent when service is disabled Signed-off-by: JoshVanL --- Gopkg.lock | 1 + pkg/puppet/puppet.go | 113 +++++++++++------- .../aws_es_proxy/manifests/instance.pp | 17 ++- puppet/modules/calico/manifests/config.pp | 2 + .../disable_source_destination_check.pp | 1 + puppet/modules/calico/manifests/node.pp | 1 + .../calico/manifests/policy_controller.pp | 14 ++- .../calico/spec/classes/config_spec.rb | 7 +- .../modules/calico/spec/classes/init_spec.rb | 14 ++- .../modules/calico/spec/classes/node_spec.rb | 7 +- .../spec/classes/policy_controller_spec.rb | 7 +- puppet/modules/fluent_bit/manifests/config.pp | 2 +- .../modules/fluent_bit/manifests/daemonset.pp | 1 + puppet/modules/fluent_bit/manifests/init.pp | 1 + puppet/modules/fluent_bit/manifests/output.pp | 11 +- .../modules/fluent_bit/manifests/service.pp | 11 +- .../fluent_bit/spec/classes/daemonset_spec.rb | 7 +- .../fluent_bit/spec/classes/init_spec.rb | 14 ++- .../fluent_bit/spec/defines/output_spec.rb | 7 ++ puppet/modules/kubernetes/manifests/apply.pp | 11 +- .../kubernetes/manifests/apply_fragment.pp | 11 +- puppet/modules/kubernetes/manifests/delete.pp | 12 -- puppet/modules/kubernetes/manifests/dns.pp | 6 +- .../manifests/pod_security_policy.pp | 17 ++- puppet/modules/kubernetes/manifests/rbac.pp | 23 ++-- .../kubernetes/manifests/storage_classes.pp | 1 + .../kubernetes/spec/classes/dns_spec.rb | 12 +- .../spec/classes/pod_security_policy_spec.rb | 31 +++++ .../kubernetes/spec/classes/rbac_spec.rb | 8 +- .../kubernetes/spec/defines/apply_spec.rb | 10 ++ .../manifests/cluster_autoscaler.pp | 19 ++- .../kubernetes_addons/manifests/dashboard.pp | 2 + .../manifests/default_backend.pp | 2 + .../manifests/elasticsearch.pp | 2 + .../manifests/fluentd_elasticsearch.pp | 2 + .../kubernetes_addons/manifests/grafana.pp | 17 ++- .../kubernetes_addons/manifests/heapster.pp | 19 ++- .../kubernetes_addons/manifests/influxdb.pp | 17 ++- .../kubernetes_addons/manifests/kibana.pp | 2 + .../kubernetes_addons/manifests/kube2iam.pp | 2 + .../manifests/metrics_server.pp | 17 ++- .../manifests/nginx_ingress.pp | 2 + .../kubernetes_addons/manifests/tiller.pp | 2 + .../spec/classes/cluster_autoscaler_spec.rb | 7 +- .../spec/classes/dashboard_spec.rb | 5 +- .../spec/classes/default_backend_spec.rb | 5 +- .../spec/classes/elasticsearch_spec.rb | 5 +- .../classes/fluentd_elasticsearch_spec.rb | 5 +- .../spec/classes/grafana_spec.rb | 5 +- .../spec/classes/heapster_spec.rb | 5 +- .../spec/classes/influxdb_spec.rb | 5 +- .../spec/classes/kibana_spec.rb | 5 +- .../spec/classes/kube2iam_spec.rb | 5 +- .../spec/classes/metrics_server_spec.rb | 5 +- .../spec/classes/nginx_ingress_spec.rb | 5 +- .../spec/classes/tiller_spec.rb | 5 +- .../prometheus/manifests/blackbox_exporter.pp | 1 + .../manifests/blackbox_exporter_etcd.pp | 8 +- puppet/modules/prometheus/manifests/init.pp | 8 ++ .../manifests/kube_state_metrics.pp | 1 + .../prometheus/manifests/node_exporter.pp | 72 ++++++----- puppet/modules/prometheus/manifests/rule.pp | 4 +- .../prometheus/manifests/scrape_config.pp | 4 + puppet/modules/prometheus/manifests/server.pp | 49 ++++---- .../spec/defines/scrape_config_spec.rb | 1 + puppet/modules/tarmak/manifests/fluent_bit.pp | 1 - 66 files changed, 466 insertions(+), 235 deletions(-) delete mode 100644 puppet/modules/kubernetes/manifests/delete.pp create mode 100644 puppet/modules/kubernetes/spec/classes/pod_security_policy_spec.rb diff --git a/Gopkg.lock b/Gopkg.lock index 92adfb299e..49a695274d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2821,6 +2821,7 @@ "github.com/terraform-providers/terraform-provider-template/template", "github.com/terraform-providers/terraform-provider-tls/tls", "golang.org/x/crypto/ssh", + "golang.org/x/crypto/ssh/knownhosts", "golang.org/x/net/context", "gopkg.in/src-d/go-git.v4", "gopkg.in/src-d/go-git.v4/config", diff --git a/pkg/puppet/puppet.go b/pkg/puppet/puppet.go index fb2bdc531f..98e7a4bc1d 100644 --- a/pkg/puppet/puppet.go +++ b/pkg/puppet/puppet.go @@ -174,13 +174,16 @@ func kubernetesClusterConfig(conf *clusterv1alpha1.ClusterKubernetes, hieraData } // enable prometheus if set, default: enabled + hieraData.classes = append(hieraData.classes, `prometheus`) if conf.Prometheus == nil || conf.Prometheus.Enabled { mode := clusterv1alpha1.PrometheusModeFull if conf.Prometheus != nil && conf.Prometheus.Mode != "" { mode = conf.Prometheus.Mode } hieraData.variables = append(hieraData.variables, fmt.Sprintf("prometheus::mode: %s", mode)) - hieraData.classes = append(hieraData.classes, `prometheus`) + hieraData.variables = append(hieraData.variables, `prometheus::ensure: "present"`) + } else { + hieraData.variables = append(hieraData.variables, `prometheus::ensure: "absent"`) } globalGates := make(map[string]bool) @@ -275,35 +278,40 @@ func kubernetesClusterConfigPerRole(conf *clusterv1alpha1.ClusterKubernetes, rol return } - if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.ClusterAutoscaler != nil && conf.ClusterAutoscaler.Enabled { + if roleName == clusterv1alpha1.KubernetesMasterRoleName { hieraData.classes = append(hieraData.classes, `kubernetes_addons::cluster_autoscaler`) - if conf.ClusterAutoscaler.Image != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::image: "%s"`, conf.ClusterAutoscaler.Image)) - } - if conf.ClusterAutoscaler.Version != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::version: "%s"`, conf.ClusterAutoscaler.Version)) - } + if conf.ClusterAutoscaler != nil && conf.ClusterAutoscaler.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::ensure: "present"`) + if conf.ClusterAutoscaler.Image != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::image: "%s"`, conf.ClusterAutoscaler.Image)) + } + if conf.ClusterAutoscaler.Version != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::version: "%s"`, conf.ClusterAutoscaler.Version)) + } - if conf.ClusterAutoscaler.ScaleDownUtilizationThreshold != nil { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::scale_down_utilization_threshold: %v`, *conf.ClusterAutoscaler.ScaleDownUtilizationThreshold)) - } + if conf.ClusterAutoscaler.ScaleDownUtilizationThreshold != nil { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::scale_down_utilization_threshold: %v`, *conf.ClusterAutoscaler.ScaleDownUtilizationThreshold)) + } - if conf.ClusterAutoscaler.Overprovisioning != nil && conf.ClusterAutoscaler.Overprovisioning.Enabled { - hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::enable_overprovisioning: true`) - hieraData.variables = append(hieraData.variables, `kubernetes::enable_pod_priority: true`) + if conf.ClusterAutoscaler.Overprovisioning != nil && conf.ClusterAutoscaler.Overprovisioning.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::enable_overprovisioning: true`) + hieraData.variables = append(hieraData.variables, `kubernetes::enable_pod_priority: true`) - if conf.ClusterAutoscaler.Overprovisioning.Image != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_image: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Image)) - } - if conf.ClusterAutoscaler.Overprovisioning.Version != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_version: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Version)) - } + if conf.ClusterAutoscaler.Overprovisioning.Image != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_image: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Image)) + } + if conf.ClusterAutoscaler.Overprovisioning.Version != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_version: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Version)) + } - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_millicores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica)) - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_megabytes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica)) - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::cores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.CoresPerReplica)) - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::nodes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.NodesPerReplica)) - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::replica_count: %d`, conf.ClusterAutoscaler.Overprovisioning.ReplicaCount)) + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_millicores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica)) + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_megabytes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica)) + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::cores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.CoresPerReplica)) + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::nodes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.NodesPerReplica)) + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::replica_count: %d`, conf.ClusterAutoscaler.Overprovisioning.ReplicaCount)) + } + } else { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::ensure: "absent"`) } } @@ -313,36 +321,52 @@ func kubernetesClusterConfigPerRole(conf *clusterv1alpha1.ClusterKubernetes, rol } } - if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.Tiller != nil && conf.Tiller.Enabled { + if roleName == clusterv1alpha1.KubernetesMasterRoleName { hieraData.classes = append(hieraData.classes, `kubernetes_addons::tiller`) - if conf.Tiller.Image != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::image: "%s"`, conf.Tiller.Image)) - } - if conf.Tiller.Version != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::version: "%s"`, conf.Tiller.Version)) + if conf.Tiller != nil && conf.Tiller.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::tiller::ensure: "present"`) + if conf.Tiller.Image != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::image: "%s"`, conf.Tiller.Image)) + } + if conf.Tiller.Version != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::version: "%s"`, conf.Tiller.Version)) + } + } else { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::tiller::ensure: "absent"`) } } - if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.Dashboard != nil && conf.Dashboard.Enabled { + if roleName == clusterv1alpha1.KubernetesMasterRoleName { hieraData.classes = append(hieraData.classes, `kubernetes_addons::dashboard`) - if conf.Dashboard.Image != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::image: "%s"`, conf.Dashboard.Image)) - } - if conf.Dashboard.Version != "" { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::version: "%s"`, conf.Dashboard.Version)) + if conf.Dashboard != nil && conf.Dashboard.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::dashboard::ensure: "present"`) + if conf.Dashboard.Image != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::image: "%s"`, conf.Dashboard.Image)) + } + if conf.Dashboard.Version != "" { + hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::version: "%s"`, conf.Dashboard.Version)) + } + } else { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::dashboard::ensure: "absent"`) } } - if g := conf.Grafana; g != nil { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::grafana::enabled: %t`, conf.Grafana.Enabled)) + if g := conf.Grafana; g != nil && conf.Grafana.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::grafana::ensure: "present"`) + } else { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::grafana::ensure: "absent"`) } - if h := conf.Heapster; h != nil { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::heapster::enabled: %t`, conf.Heapster.Enabled)) + if h := conf.Heapster; h != nil && conf.Heapster.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::heapster::ensure: "present"`) + } else { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::heapster::ensure: "absent"`) } - if i := conf.Heapster; i != nil { - hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::influxdb::enabled: %t`, conf.InfluxDB.Enabled)) + if i := conf.Heapster; i != nil && conf.InfluxDB.Enabled { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::influxdb::ensure: "present"`) + } else { + hieraData.variables = append(hieraData.variables, `kubernetes_addons::influxdb::ensure: "absent"`) } return @@ -378,6 +402,9 @@ func (p *Puppet) contentClusterConfig(cluster interfaces.Cluster) ([]string, err return nil, fmt.Errorf("unable to marshall logging sinks: %s", err) } hieraData.variables = append(hieraData.variables, fmt.Sprintf(`tarmak::fluent_bit_configs: %s`, string(jsonLoggingSink))) + hieraData.variables = append(hieraData.variables, `fluent_bit::ensure: "present"`) + } else { + hieraData.variables = append(hieraData.variables, `fluent_bit::ensure: "absent"`) } if v := cluster.Config().VaultHelper; v != nil { diff --git a/puppet/modules/aws_es_proxy/manifests/instance.pp b/puppet/modules/aws_es_proxy/manifests/instance.pp index e98fc3985f..78cd250733 100644 --- a/puppet/modules/aws_es_proxy/manifests/instance.pp +++ b/puppet/modules/aws_es_proxy/manifests/instance.pp @@ -3,8 +3,7 @@ Boolean $tls = true, Integer $dest_port = 9200, Integer $listen_port = 9200, - Enum['running', 'stopped'] $ensure_service = 'running', - Boolean $enable_service = true + Enum['present', 'absent'] $ensure = 'present' ){ include ::aws_es_proxy @@ -27,6 +26,14 @@ } } + if $ensure == 'present' { + $service_ensure = 'running' + $service_enable = true + } else { + $service_ensure = 'stopped' + $service_enable = false + } + exec { "${service_name}-systemctl-daemon-reload": command => 'systemctl daemon-reload', refreshonly => true, @@ -34,7 +41,7 @@ } file{ "/etc/systemd/system/${service_name}.service": - ensure => 'file', + ensure => file, mode => '0644', owner => 'root', group => 'root', @@ -42,8 +49,8 @@ notify => Exec["${service_name}-systemctl-daemon-reload"] } ~> service { "${service_name}.service": - ensure => $ensure_service, - enable => $enable_service, + ensure => $service_ensure, + enable => $service_enable, hasstatus => true, hasrestart => true, subscribe => Class['aws_es_proxy::install'] diff --git a/puppet/modules/calico/manifests/config.pp b/puppet/modules/calico/manifests/config.pp index ff151556c1..0335c608bf 100644 --- a/puppet/modules/calico/manifests/config.pp +++ b/puppet/modules/calico/manifests/config.pp @@ -17,6 +17,7 @@ } kubernetes::apply{'calico-config': + ensure => 'present', manifests => [ template('calico/configmap_etcd.yaml.erb'), ], @@ -26,6 +27,7 @@ $pod_network = $::calico::pod_network kubernetes::apply{'calico-config': + ensure => 'present', manifests => [ template('calico/configmap_kubernetes.yaml.erb'), ], diff --git a/puppet/modules/calico/manifests/disable_source_destination_check.pp b/puppet/modules/calico/manifests/disable_source_destination_check.pp index 9a531c9b00..672e9df190 100644 --- a/puppet/modules/calico/manifests/disable_source_destination_check.pp +++ b/puppet/modules/calico/manifests/disable_source_destination_check.pp @@ -41,6 +41,7 @@ $aws_region = $::ec2_metadata['placement']['availability-zone'][0,-2] kubernetes::apply{'disable-srcdest-node': + ensure => 'present', manifests => [ template('calico/disable-source-destination.yaml.erb'), ], diff --git a/puppet/modules/calico/manifests/node.pp b/puppet/modules/calico/manifests/node.pp index f0a5f0397b..68cac86d98 100644 --- a/puppet/modules/calico/manifests/node.pp +++ b/puppet/modules/calico/manifests/node.pp @@ -60,6 +60,7 @@ } kubernetes::apply{'calico-node': + ensure => 'present', manifests => [ template('calico/node-rbac.yaml.erb'), template('calico/node-daemonset.yaml.erb'), diff --git a/puppet/modules/calico/manifests/policy_controller.pp b/puppet/modules/calico/manifests/policy_controller.pp index ebd03f75e9..23b69be73c 100644 --- a/puppet/modules/calico/manifests/policy_controller.pp +++ b/puppet/modules/calico/manifests/policy_controller.pp @@ -23,6 +23,7 @@ if $backend == 'etcd' { $namespace = $::calico::namespace + $ensure = 'present' if $::calico::etcd_proto == 'https' { $etcd_tls_dir = $::calico::etcd_tls_dir @@ -30,11 +31,14 @@ } else { $tls = false } + } else { + $ensure = 'absent' + } - kubernetes::apply{'calico-policy-controller': - manifests => [ - template('calico/policy-controller-deployment.yaml.erb'), - ], - } + kubernetes::apply{'calico-policy-controller': + ensure => $ensure, + manifests => [ + template('calico/policy-controller-deployment.yaml.erb'), + ], } } diff --git a/puppet/modules/calico/spec/classes/config_spec.rb b/puppet/modules/calico/spec/classes/config_spec.rb index 7dd05b2b11..16ddd3737e 100644 --- a/puppet/modules/calico/spec/classes/config_spec.rb +++ b/puppet/modules/calico/spec/classes/config_spec.rb @@ -8,9 +8,12 @@ " class kubernetes{} define kubernetes::apply( - $manifests, + Enum['present', 'absent'] $ensure = 'present', + $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } class{'calico': #{cloud_provider} diff --git a/puppet/modules/calico/spec/classes/init_spec.rb b/puppet/modules/calico/spec/classes/init_spec.rb index 37922827f4..bf4bc90330 100644 --- a/puppet/modules/calico/spec/classes/init_spec.rb +++ b/puppet/modules/calico/spec/classes/init_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.7.10' } define kubernetes::apply( - $manifests, + Enum['present', 'absent'] $ensure = 'present', + $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end @@ -55,9 +58,12 @@ class kubernetes{ class kubernetes::apiserver{} include kubernetes::apiserver define kubernetes::apply( - $manifests, + Enum['present', 'absent'] $ensure = 'present', + $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/calico/spec/classes/node_spec.rb b/puppet/modules/calico/spec/classes/node_spec.rb index 4375c7073c..f2df3a8217 100644 --- a/puppet/modules/calico/spec/classes/node_spec.rb +++ b/puppet/modules/calico/spec/classes/node_spec.rb @@ -21,9 +21,12 @@ class kubernetes{ $_apiserver_insecure_port = 1000 } define kubernetes::apply( - $manifests, + Enum['present', 'absent'] $ensure = 'present', + $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } class{'calico': #{mtu} diff --git a/puppet/modules/calico/spec/classes/policy_controller_spec.rb b/puppet/modules/calico/spec/classes/policy_controller_spec.rb index 021fb75c7b..4a0cbdd89b 100644 --- a/puppet/modules/calico/spec/classes/policy_controller_spec.rb +++ b/puppet/modules/calico/spec/classes/policy_controller_spec.rb @@ -16,9 +16,12 @@ class kubernetes{ $version = '#{kubernetes_version}' } define kubernetes::apply( - $manifests, + Enum['present', 'absent'] $ensure = 'present', + $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } class{'calico': #{mtu} diff --git a/puppet/modules/fluent_bit/manifests/config.pp b/puppet/modules/fluent_bit/manifests/config.pp index 483b78d619..bc767fc5d7 100644 --- a/puppet/modules/fluent_bit/manifests/config.pp +++ b/puppet/modules/fluent_bit/manifests/config.pp @@ -8,7 +8,7 @@ } file { '/etc/td-agent-bit/td-agent-bit.conf': - ensure => file, + ensure => $::fluent_bit::ensure, mode => '0644', owner => 'root', group => 'root', diff --git a/puppet/modules/fluent_bit/manifests/daemonset.pp b/puppet/modules/fluent_bit/manifests/daemonset.pp index 311c7644e6..97d5ed8513 100644 --- a/puppet/modules/fluent_bit/manifests/daemonset.pp +++ b/puppet/modules/fluent_bit/manifests/daemonset.pp @@ -28,6 +28,7 @@ } kubernetes::apply{'fluent-bit': + ensure => $::fluent_bit::ensure, manifests => [ template('fluent_bit/fluent-bit-configmap.yaml.erb'), template('fluent_bit/fluent-bit-daemonset.yaml.erb'), diff --git a/puppet/modules/fluent_bit/manifests/init.pp b/puppet/modules/fluent_bit/manifests/init.pp index 71c3e945d8..798acd65ae 100644 --- a/puppet/modules/fluent_bit/manifests/init.pp +++ b/puppet/modules/fluent_bit/manifests/init.pp @@ -1,6 +1,7 @@ class fluent_bit ( $package_name = $::fluent_bit::params::package_name, $service_name = $::fluent_bit::params::service_name, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::fluent_bit::params { $path = defined('$::path') ? { diff --git a/puppet/modules/fluent_bit/manifests/output.pp b/puppet/modules/fluent_bit/manifests/output.pp index 2ea8324ff5..5e6f6e550b 100644 --- a/puppet/modules/fluent_bit/manifests/output.pp +++ b/puppet/modules/fluent_bit/manifests/output.pp @@ -19,6 +19,7 @@ if $elasticsearch and $elasticsearch['amazonESProxy'] { ::aws_es_proxy::instance{ $name: + ensure => $::fluent_bit::ensure, tls => $elasticsearch['tls'], dest_port => $elasticsearch['port'], dest_address => $elasticsearch['host'], @@ -26,14 +27,13 @@ } } else { ::aws_es_proxy::instance{ $name: - ensure_service => 'stopped', - enable_service => false, - dest_address => '', + ensure => 'absent', + dest_address => '', } } file { "/etc/td-agent-bit/td-agent-bit-output-${name}.conf": - ensure => file, + ensure => $::fluent_bit::ensure, mode => '0640', owner => 'root', group => 'root', @@ -42,11 +42,10 @@ } file { "/etc/td-agent-bit/daemonset/td-agent-bit-output-${name}.conf": - ensure => file, + ensure => $::fluent_bit::ensure, mode => '0640', owner => 'root', group => 'root', content => template('fluent_bit/td-agent-bit-output.conf.erb'), } - } diff --git a/puppet/modules/fluent_bit/manifests/service.pp b/puppet/modules/fluent_bit/manifests/service.pp index 8b46800721..389a60856c 100644 --- a/puppet/modules/fluent_bit/manifests/service.pp +++ b/puppet/modules/fluent_bit/manifests/service.pp @@ -1,8 +1,15 @@ class fluent_bit::service { + if $::fluent_bit::ensure == 'present' { + $service_ensure = 'running' + $service_enable = true + } else { + $service_ensure = 'stopped' + $service_enable = false + } service { $::fluent_bit::service_name: - ensure => running, - enable => true, + ensure => $service_ensure, + enable => $service_enable, hasstatus => true, hasrestart => true, } diff --git a/puppet/modules/fluent_bit/spec/classes/daemonset_spec.rb b/puppet/modules/fluent_bit/spec/classes/daemonset_spec.rb index 42ea8ec02d..5f0d79a0aa 100644 --- a/puppet/modules/fluent_bit/spec/classes/daemonset_spec.rb +++ b/puppet/modules/fluent_bit/spec/classes/daemonset_spec.rb @@ -1,9 +1,10 @@ require 'spec_helper' describe 'fluent_bit::daemonset' do let(:pre_condition) do - [ - 'include kubernetes::apiserver' - ] + """ + class{'fluent_bit': ensure => 'present'} + class{'kubernetes::apiserver':} + """ end let :manifests_file do diff --git a/puppet/modules/fluent_bit/spec/classes/init_spec.rb b/puppet/modules/fluent_bit/spec/classes/init_spec.rb index fd052b2749..e35358f981 100644 --- a/puppet/modules/fluent_bit/spec/classes/init_spec.rb +++ b/puppet/modules/fluent_bit/spec/classes/init_spec.rb @@ -1,9 +1,21 @@ require 'spec_helper' describe 'fluent_bit' do + + let(:pre_condition) do + [ + 'include kubernetes::apiserver' + ] + end + let(:pre_condition) do + """ + class{'fluent_bit': ensure => 'present'} + """ + end + context 'with default values for all parameters' do it { should contain_class('fluent_bit') } end - + context 'on cloud_provider aws' do let(:params) { { diff --git a/puppet/modules/fluent_bit/spec/defines/output_spec.rb b/puppet/modules/fluent_bit/spec/defines/output_spec.rb index 356661d131..14fa339c02 100644 --- a/puppet/modules/fluent_bit/spec/defines/output_spec.rb +++ b/puppet/modules/fluent_bit/spec/defines/output_spec.rb @@ -2,6 +2,13 @@ describe 'fluent_bit::output', :type => :define do + let(:pre_condition) do + """ + class{'fluent_bit': ensure => 'present'} + class{'kubernetes::apiserver':} + """ + end + let(:config) { contain_file('/etc/td-agent-bit/td-agent-bit.conf') } diff --git a/puppet/modules/kubernetes/manifests/apply.pp b/puppet/modules/kubernetes/manifests/apply.pp index b53172cf75..04d438b5c6 100644 --- a/puppet/modules/kubernetes/manifests/apply.pp +++ b/puppet/modules/kubernetes/manifests/apply.pp @@ -4,6 +4,7 @@ $force = false, $format = 'yaml', Enum['manifests','concat'] $type = 'manifests', + Enum['present', 'absent'] $ensure = 'present', ){ require ::kubernetes require ::kubernetes::kubectl @@ -21,7 +22,7 @@ case $type { 'manifests': { file{$apply_file: - ensure => file, + ensure => $ensure, mode => '0640', owner => 'root', group => $kubernetes::group, @@ -31,7 +32,7 @@ } 'concat': { concat { $apply_file: - ensure => present, + ensure => $ensure, ensure_newline => true, mode => '0640', owner => 'root', @@ -54,7 +55,11 @@ $protocol = 'http' } - $command = "/bin/bash -c \"while true; do if [[ \$(curl -k -w '%{http_code}' -s -o /dev/null ${protocol}://localhost:${server_port}/healthz) == 200 ]]; then break; else sleep 2; fi; done; kubectl apply -f '${apply_file}' || rm -f '${apply_file})'\"" + if $ensure == 'present' { + $command = "/bin/bash -c \"while true; do if [[ \$(curl -k -w '%{http_code}' -s -o /dev/null ${protocol}://localhost:${server_port}/healthz) == 200 ]]; then break; else sleep 2; fi; done; kubectl apply -f '${apply_file}' || rm -f '${apply_file})'\"" + } else { + $command = '/bin/true' + } # validate file first exec{"validate_${name}": diff --git a/puppet/modules/kubernetes/manifests/apply_fragment.pp b/puppet/modules/kubernetes/manifests/apply_fragment.pp index a818b5d9f1..5bb7514920 100644 --- a/puppet/modules/kubernetes/manifests/apply_fragment.pp +++ b/puppet/modules/kubernetes/manifests/apply_fragment.pp @@ -4,6 +4,7 @@ $order, $target, $format = 'yaml', + Enum['present', 'absent'] $ensure = 'present', ){ require ::kubernetes require ::kubernetes::kubectl @@ -14,9 +15,11 @@ $apply_file = "${::kubernetes::apply_dir}/${target}.${format}" - concat::fragment { "kubectl-apply-${name}": - target => $apply_file, - content => $content, - order => $order, + if $ensure == 'present' { + concat::fragment { "kubectl-apply-${name}": + target => $apply_file, + content => $content, + order => $order, + } } } diff --git a/puppet/modules/kubernetes/manifests/delete.pp b/puppet/modules/kubernetes/manifests/delete.pp deleted file mode 100644 index 9198737b33..0000000000 --- a/puppet/modules/kubernetes/manifests/delete.pp +++ /dev/null @@ -1,12 +0,0 @@ -# deletes resources to a kubernetes master -define kubernetes::delete( - $format = 'yaml', -){ - require ::kubernetes - - $apply_file = "${::kubernetes::apply_dir}/${name}.${format}" - - file {$apply_file: - ensure => absent, - } -} diff --git a/puppet/modules/kubernetes/manifests/dns.pp b/puppet/modules/kubernetes/manifests/dns.pp index 489a6e47e3..64b779203b 100644 --- a/puppet/modules/kubernetes/manifests/dns.pp +++ b/puppet/modules/kubernetes/manifests/dns.pp @@ -52,6 +52,7 @@ } kubernetes::apply{$app_name: + ensure => 'present', manifests => concat( $manifests, template('kubernetes/dns-service-account.yaml.erb'), @@ -61,5 +62,8 @@ template('kubernetes/dns-cluster-role.yaml.erb'), template('kubernetes/dns-cluster-role-binding.yaml.erb'), ), - } -> kubernetes::delete{$delete_app_name:} + } -> kubernetes::apply{$delete_app_name: + ensure => 'absent', + manifests => [], + } } diff --git a/puppet/modules/kubernetes/manifests/pod_security_policy.pp b/puppet/modules/kubernetes/manifests/pod_security_policy.pp index 8ccd9b9738..48cde7bd4e 100644 --- a/puppet/modules/kubernetes/manifests/pod_security_policy.pp +++ b/puppet/modules/kubernetes/manifests/pod_security_policy.pp @@ -4,17 +4,22 @@ $pod_security_policy = $::kubernetes::_pod_security_policy if $pod_security_policy { + $ensure = 'present' $authorization_mode = $kubernetes::_authorization_mode if ! member($authorization_mode, 'RBAC') { fail('RBAC should be enabled when PodSecurityPolicy is enabled.') } - kubernetes::apply{'puppernetes-rbac-psp': - manifests => [ - template('kubernetes/pod-security-policy-rbac.yaml.erb'), - template('kubernetes/pod-security-policy.yaml.erb'), - ], - } + } else { + $ensure = 'absent' + } + + kubernetes::apply{'puppernetes-rbac-psp': + ensure => $ensure, + manifests => [ + template('kubernetes/pod-security-policy-rbac.yaml.erb'), + template('kubernetes/pod-security-policy.yaml.erb'), + ], } } diff --git a/puppet/modules/kubernetes/manifests/rbac.pp b/puppet/modules/kubernetes/manifests/rbac.pp index ee5b3df1ef..f2fa845038 100644 --- a/puppet/modules/kubernetes/manifests/rbac.pp +++ b/puppet/modules/kubernetes/manifests/rbac.pp @@ -4,14 +4,19 @@ $authorization_mode = $kubernetes::_authorization_mode if member($authorization_mode, 'RBAC') and versioncmp($::kubernetes::version, '1.6.0') < 0 { - kubernetes::apply{'puppernetes-rbac': - manifests => [ - template('kubernetes/rbac-namespace-kube-public.yaml.erb'), - template('kubernetes/rbac-cluster-roles.yaml.erb'), - template('kubernetes/rbac-cluster-role-bindings.yaml.erb'), - template('kubernetes/rbac-namespace-roles.yaml.erb'), - template('kubernetes/rbac-namespace-role-bindings.yaml.erb'), - ], - } + $ensure = 'present' + } else { + $ensure = 'absent' + } + + kubernetes::apply{'puppernetes-rbac': + ensure => $ensure, + manifests => [ + template('kubernetes/rbac-namespace-kube-public.yaml.erb'), + template('kubernetes/rbac-cluster-roles.yaml.erb'), + template('kubernetes/rbac-cluster-role-bindings.yaml.erb'), + template('kubernetes/rbac-namespace-roles.yaml.erb'), + template('kubernetes/rbac-namespace-role-bindings.yaml.erb'), + ], } } diff --git a/puppet/modules/kubernetes/manifests/storage_classes.pp b/puppet/modules/kubernetes/manifests/storage_classes.pp index ae6aa4d97e..a5587a9cef 100644 --- a/puppet/modules/kubernetes/manifests/storage_classes.pp +++ b/puppet/modules/kubernetes/manifests/storage_classes.pp @@ -4,6 +4,7 @@ if versioncmp($::kubernetes::version, '1.4.0') >= 0 { if $cloud_provider == 'aws' { kubernetes::apply{'storage-classes': + ensure => 'present', manifests => [ template('kubernetes/storage-classes-aws.yaml.erb'), ], diff --git a/puppet/modules/kubernetes/spec/classes/dns_spec.rb b/puppet/modules/kubernetes/spec/classes/dns_spec.rb index d1538b5bd0..498280c4c6 100644 --- a/puppet/modules/kubernetes/spec/classes/dns_spec.rb +++ b/puppet/modules/kubernetes/spec/classes/dns_spec.rb @@ -6,11 +6,13 @@ " class{'kubernetes': version => '1.9.0'} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } - define kubernetes::delete(){} " end @@ -37,11 +39,13 @@ " class{'kubernetes': version => '1.11.0'} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } - define kubernetes::delete(){} " end diff --git a/puppet/modules/kubernetes/spec/classes/pod_security_policy_spec.rb b/puppet/modules/kubernetes/spec/classes/pod_security_policy_spec.rb new file mode 100644 index 0000000000..5e402e9f06 --- /dev/null +++ b/puppet/modules/kubernetes/spec/classes/pod_security_policy_spec.rb @@ -0,0 +1,31 @@ +require 'spec_helper' + +describe 'kubernetes::pod_security_policy' do + context 'with default values for all parameters' do + let(:pre_condition) do + " + class{'kubernetes': version => '1.9.0'} + define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', + $manifests, + ){ + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } + } + " + end + + let(:manifests) do + catalogue.resource('Kubernetes::Apply', 'puppernetes-rbac-psp').send(:parameters)[:manifests] + end + + it { should contain_class('kubernetes::pod_security_policy') } + + it 'be valid yaml' do + manifests.each do |manifest| + YAML.parse manifest + end + end + end +end diff --git a/puppet/modules/kubernetes/spec/classes/rbac_spec.rb b/puppet/modules/kubernetes/spec/classes/rbac_spec.rb index a8a81c2324..27330f0be0 100644 --- a/puppet/modules/kubernetes/spec/classes/rbac_spec.rb +++ b/puppet/modules/kubernetes/spec/classes/rbac_spec.rb @@ -13,7 +13,7 @@ class{'kubernetes::master':} """ ]} - it { should_not contain_file(crb_system_node_file) } + it { should contain_file(crb_system_node_file).with_ensure('absent') } end context 'enabled in 1.6' do @@ -23,7 +23,7 @@ class{'kubernetes::master':} """ ]} - it { should_not contain_file(crb_system_node_file) } + it { should contain_file(crb_system_node_file).with_ensure('absent') } end context 'enabled in 1.5' do @@ -33,8 +33,8 @@ class{'kubernetes::master':} """ ]} - it { should contain_file(crb_system_node_file).with_content(%r{cluster-admin}) } - it { should contain_file(crb_system_node_file).with_content(%r{system:node}) } + it { should contain_file(crb_system_node_file).with_content(%r{cluster-admin}).with_ensure('present') } + it { should contain_file(crb_system_node_file).with_content(%r{system:node}).with_ensure('present') } end end end diff --git a/puppet/modules/kubernetes/spec/defines/apply_spec.rb b/puppet/modules/kubernetes/spec/defines/apply_spec.rb index 7073536144..e05df7ba04 100644 --- a/puppet/modules/kubernetes/spec/defines/apply_spec.rb +++ b/puppet/modules/kubernetes/spec/defines/apply_spec.rb @@ -11,6 +11,9 @@ ]} context 'not running on kubernetes master' do + let :params do + { :ensure => 'present'} + end let(:pre_condition) {[]} it { should compile.and_raise_error(/only be used on the kubernetes master/) } end @@ -20,6 +23,7 @@ let :params do { :type => 'manifests', + :ensure => 'present', } end it do @@ -31,6 +35,7 @@ let :params do { :type => 'concat', + :ensure => 'present', } end it do @@ -46,6 +51,11 @@ include kubernetes::apiserver ' ]} + let :params do + { + :ensure => 'present', + } + end it do should contain_file("/etc/kubernetes/apply/#{title}.yaml") diff --git a/puppet/modules/kubernetes_addons/manifests/cluster_autoscaler.pp b/puppet/modules/kubernetes_addons/manifests/cluster_autoscaler.pp index 04243c6624..30b9211bd6 100644 --- a/puppet/modules/kubernetes_addons/manifests/cluster_autoscaler.pp +++ b/puppet/modules/kubernetes_addons/manifests/cluster_autoscaler.pp @@ -20,6 +20,7 @@ $ca_mounts=$::kubernetes_addons::params::ca_mounts, $cloud_provider=$::kubernetes_addons::params::cloud_provider, $aws_region=$::kubernetes_addons::params::aws_region, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes @@ -96,15 +97,21 @@ if $_enable_overprovisioning and versioncmp($::kubernetes::version, '1.9.0') >= 0 { - kubernetes::apply{'cluster-autoscaler-overprovisioning': - manifests => [ - template('kubernetes_addons/cluster-autoscaler-overprovisioning.yaml.erb'), - template('kubernetes_addons/cluster-autoscaler-overprovisioning-rbac.yaml.erb'), - ], - } + $overprovision_ensure = $ensure + } else { + $overprovision_ensure = 'absent' + } + + kubernetes::apply{'cluster-autoscaler-overprovisioning': + ensure => $overprovision_ensure, + manifests => [ + template('kubernetes_addons/cluster-autoscaler-overprovisioning.yaml.erb'), + template('kubernetes_addons/cluster-autoscaler-overprovisioning-rbac.yaml.erb'), + ], } kubernetes::apply{'cluster-autoscaler': + ensure => $ensure, manifests => [ template('kubernetes_addons/cluster-autoscaler-deployment.yaml.erb'), template('kubernetes_addons/cluster-autoscaler-rbac.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/dashboard.pp b/puppet/modules/kubernetes_addons/manifests/dashboard.pp index 22ac0a4e71..e7e246b32f 100644 --- a/puppet/modules/kubernetes_addons/manifests/dashboard.pp +++ b/puppet/modules/kubernetes_addons/manifests/dashboard.pp @@ -6,6 +6,7 @@ String $request_cpu='10m', String $request_mem='64Mi', $replicas=undef, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes @@ -63,6 +64,7 @@ } kubernetes::apply{'kube-dashboard': + ensure => $ensure, manifests => [ template('kubernetes_addons/dashboard-deployment.yaml.erb'), template('kubernetes_addons/dashboard-rbac.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/default_backend.pp b/puppet/modules/kubernetes_addons/manifests/default_backend.pp index a366915b5f..9ab5ca78dc 100644 --- a/puppet/modules/kubernetes_addons/manifests/default_backend.pp +++ b/puppet/modules/kubernetes_addons/manifests/default_backend.pp @@ -7,10 +7,12 @@ $limit_mem=$::kubernetes_addons::params::default_backend_limit_mem, $namespace=$::kubernetes_addons::params::namespace, $replicas=undef, + $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes kubernetes::apply{'default-backend': + ensure => $ensure, manifests => [ template('kubernetes_addons/default-backend-svc.yaml.erb'), template('kubernetes_addons/default-backend-deployment.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/elasticsearch.pp b/puppet/modules/kubernetes_addons/manifests/elasticsearch.pp index 535bb145db..086d96b299 100644 --- a/puppet/modules/kubernetes_addons/manifests/elasticsearch.pp +++ b/puppet/modules/kubernetes_addons/manifests/elasticsearch.pp @@ -11,11 +11,13 @@ String $limit_mem='2048Mi', Integer[0,65535] $node_port=0, Integer $replicas=2, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes # TODO: Support elasticsearch using StatefulSet pods kubernetes::apply{'elasticsearch': + ensure => $ensure, manifests => [ template('kubernetes_addons/elasticsearch-svc.yaml.erb'), template('kubernetes_addons/elasticsearch-deployment.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/fluentd_elasticsearch.pp b/puppet/modules/kubernetes_addons/manifests/fluentd_elasticsearch.pp index c4cd3fc1df..b8c84ff91d 100644 --- a/puppet/modules/kubernetes_addons/manifests/fluentd_elasticsearch.pp +++ b/puppet/modules/kubernetes_addons/manifests/fluentd_elasticsearch.pp @@ -6,10 +6,12 @@ String $request_mem='384Mi', String $limit_cpu='100m', String $limit_mem='256Mi', + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes kubernetes::apply{'fluentd-elasticsearch': + ensure => $ensure, manifests => [ template('kubernetes_addons/fluentd-elasticsearch-daemonset.yaml.erb'), ], diff --git a/puppet/modules/kubernetes_addons/manifests/grafana.pp b/puppet/modules/kubernetes_addons/manifests/grafana.pp index cc3d58a10b..098bc23dcc 100644 --- a/puppet/modules/kubernetes_addons/manifests/grafana.pp +++ b/puppet/modules/kubernetes_addons/manifests/grafana.pp @@ -1,18 +1,15 @@ class kubernetes_addons::grafana( $image=$::kubernetes_addons::params::grafana_image, $version=$::kubernetes_addons::params::grafana_version, - $enabled = true, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes - if $enabled { - kubernetes::apply{'heapster-grafana': - manifests => [ - template('kubernetes_addons/grafana-svc.yaml.erb'), - template('kubernetes_addons/grafana-deployment.yaml.erb'), - ], - } - } else { - kubernetes::delete{'heapster-grafana':} + kubernetes::apply{'heapster-grafana': + ensure => $ensure, + manifests => [ + template('kubernetes_addons/grafana-svc.yaml.erb'), + template('kubernetes_addons/grafana-deployment.yaml.erb'), + ], } } diff --git a/puppet/modules/kubernetes_addons/manifests/heapster.pp b/puppet/modules/kubernetes_addons/manifests/heapster.pp index 14a79c5305..46e31a5b11 100644 --- a/puppet/modules/kubernetes_addons/manifests/heapster.pp +++ b/puppet/modules/kubernetes_addons/manifests/heapster.pp @@ -11,7 +11,7 @@ $nanny_request_mem=$::kubernetes_addons::params::heapster_nanny_request_mem, $nanny_limit_cpu=$::kubernetes_addons::params::heapster_nanny_limit_cpu, $nanny_limit_mem=$::kubernetes_addons::params::heapster_nanny_limit_mem, - $enabled = true, + Enum['present', 'absent'] $ensure = 'present', $sink=undef, ) inherits ::kubernetes_addons::params { require ::kubernetes @@ -41,15 +41,12 @@ $version_before_1_9 = true } - if $enabled { - kubernetes::apply{'heapster': - manifests => [ - template('kubernetes_addons/heapster-svc.yaml.erb'), - template('kubernetes_addons/heapster-deployment.yaml.erb'), - template('kubernetes_addons/heapster-rbac.yaml.erb'), - ], - } - } else { - kubernetes::delete{'heapster':} + kubernetes::apply{'heapster': + ensure => $ensure, + manifests => [ + template('kubernetes_addons/heapster-svc.yaml.erb'), + template('kubernetes_addons/heapster-deployment.yaml.erb'), + template('kubernetes_addons/heapster-rbac.yaml.erb'), + ], } } diff --git a/puppet/modules/kubernetes_addons/manifests/influxdb.pp b/puppet/modules/kubernetes_addons/manifests/influxdb.pp index 57cb0690d8..c05e758017 100644 --- a/puppet/modules/kubernetes_addons/manifests/influxdb.pp +++ b/puppet/modules/kubernetes_addons/manifests/influxdb.pp @@ -1,18 +1,15 @@ class kubernetes_addons::influxdb( $image=$::kubernetes_addons::params::influxdb_image, $version=$::kubernetes_addons::params::influxdb_version, - $enabled = true, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes - if $enabled { - kubernetes::apply{'heapster-influxdb': - manifests => [ - template('kubernetes_addons/influxdb-svc.yaml.erb'), - template('kubernetes_addons/influxdb-deployment.yaml.erb'), - ], - } - } else { - kubernetes::delete{'heapster-influxdb':} + kubernetes::apply{'heapster-influxdb': + ensure => $ensure, + manifests => [ + template('kubernetes_addons/influxdb-svc.yaml.erb'), + template('kubernetes_addons/influxdb-deployment.yaml.erb'), + ], } } diff --git a/puppet/modules/kubernetes_addons/manifests/kibana.pp b/puppet/modules/kubernetes_addons/manifests/kibana.pp index 07fe384ca8..9ff73fddf3 100644 --- a/puppet/modules/kubernetes_addons/manifests/kibana.pp +++ b/puppet/modules/kubernetes_addons/manifests/kibana.pp @@ -7,10 +7,12 @@ String $limit_cpu='1', String $limit_mem='2Gi', Integer $replicas=2, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes kubernetes::apply{'kibana': + ensure => $ensure, manifests => [ template('kubernetes_addons/kibana-svc.yaml.erb'), template('kubernetes_addons/kibana-deployment.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/kube2iam.pp b/puppet/modules/kubernetes_addons/manifests/kube2iam.pp index 299d60cbea..1fd8b222c3 100644 --- a/puppet/modules/kubernetes_addons/manifests/kube2iam.pp +++ b/puppet/modules/kubernetes_addons/manifests/kube2iam.pp @@ -7,6 +7,7 @@ String $request_mem='64Mi', String $limit_cpu='', String $limit_mem='256Mi', + Enum['present', 'absent'] $ensure = 'present', ) { require ::kubernetes @@ -24,6 +25,7 @@ } kubernetes::apply{'kube2iam': + ensure => $ensure, manifests => [ template('kubernetes_addons/kube2iam-daemonset.yaml.erb'), template('kubernetes_addons/kube2iam-rbac.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/metrics_server.pp b/puppet/modules/kubernetes_addons/manifests/metrics_server.pp index 559e03d579..8bc4c585cb 100644 --- a/puppet/modules/kubernetes_addons/manifests/metrics_server.pp +++ b/puppet/modules/kubernetes_addons/manifests/metrics_server.pp @@ -44,11 +44,16 @@ } if versioncmp($::kubernetes::version, '1.7.0') >= 0 { - kubernetes::apply{'metrics-server': - manifests => [ - template('kubernetes_addons/metrics-server.yaml.erb'), - template('kubernetes_addons/metrics-server-rbac.yaml.erb'), - ], - } + $ensure = 'present' + } else { + $ensure = 'absent' + } + + kubernetes::apply{'metrics-server': + ensure => $ensure, + manifests => [ + template('kubernetes_addons/metrics-server.yaml.erb'), + template('kubernetes_addons/metrics-server-rbac.yaml.erb'), + ], } } diff --git a/puppet/modules/kubernetes_addons/manifests/nginx_ingress.pp b/puppet/modules/kubernetes_addons/manifests/nginx_ingress.pp index 0ef374172b..4168228481 100644 --- a/puppet/modules/kubernetes_addons/manifests/nginx_ingress.pp +++ b/puppet/modules/kubernetes_addons/manifests/nginx_ingress.pp @@ -8,6 +8,7 @@ $namespace=$::kubernetes_addons::params::namespace, $replicas=undef, $host_port=false, + Enum['present', 'absent'] $ensure = 'present', ) inherits ::kubernetes_addons::params { require ::kubernetes require ::kubernetes_addons::default_backend @@ -28,6 +29,7 @@ } kubernetes::apply{'nginx-ingress': + ensure => $ensure, manifests => [ template('kubernetes_addons/nginx-ingress-svc.yaml.erb'), template('kubernetes_addons/nginx-ingress-deployment.yaml.erb'), diff --git a/puppet/modules/kubernetes_addons/manifests/tiller.pp b/puppet/modules/kubernetes_addons/manifests/tiller.pp index 8ccd67ff46..6de2e739ae 100644 --- a/puppet/modules/kubernetes_addons/manifests/tiller.pp +++ b/puppet/modules/kubernetes_addons/manifests/tiller.pp @@ -2,6 +2,7 @@ String $image='gcr.io/kubernetes-helm/tiller', String $version='2.9.1', String $namespace='kube-system', + Enum['present', 'absent'] $ensure = 'present' ) inherits ::kubernetes_addons::params { require ::kubernetes @@ -31,6 +32,7 @@ } kubernetes::apply{'tiller': + ensure => $ensure, manifests => [ template('kubernetes_addons/tiller-deployment.yaml.erb'), ], diff --git a/puppet/modules/kubernetes_addons/spec/classes/cluster_autoscaler_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/cluster_autoscaler_spec.rb index c5f6cd3295..7a97d4ed81 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/cluster_autoscaler_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/cluster_autoscaler_spec.rb @@ -31,9 +31,12 @@ class kubernetes{ $cluster_name = 'cluster1' } define kubernetes::apply( - $manifests, + Enum['present', 'absent'] $ensure = 'present', + $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/dashboard_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/dashboard_spec.rb index 55be96c120..bba43ed7b8 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/dashboard_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/dashboard_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.6.4' } define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/default_backend_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/default_backend_spec.rb index be24ebd2f2..3ccb9f1f65 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/default_backend_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/default_backend_spec.rb @@ -4,9 +4,12 @@ " class kubernetes{} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/elasticsearch_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/elasticsearch_spec.rb index 8fa7eb9282..62d2a358fa 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/elasticsearch_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/elasticsearch_spec.rb @@ -4,9 +4,12 @@ " class kubernetes{} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/fluentd_elasticsearch_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/fluentd_elasticsearch_spec.rb index 6492d6a234..d64b86cb62 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/fluentd_elasticsearch_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/fluentd_elasticsearch_spec.rb @@ -4,9 +4,12 @@ " class kubernetes{} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/grafana_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/grafana_spec.rb index 5af5e20ecc..320ccde4e3 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/grafana_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/grafana_spec.rb @@ -4,9 +4,12 @@ " class kubernetes{} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/heapster_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/heapster_spec.rb index 0ef50b4305..3158e72642 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/heapster_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/heapster_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.6.4' } define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/influxdb_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/influxdb_spec.rb index b6e7f78f54..8a5784cafb 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/influxdb_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/influxdb_spec.rb @@ -4,9 +4,12 @@ " class kubernetes{} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/kibana_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/kibana_spec.rb index 1ab9ac0284..065c43d233 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/kibana_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/kibana_spec.rb @@ -4,9 +4,12 @@ " class kubernetes{} define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/kube2iam_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/kube2iam_spec.rb index 121265e34f..45cddfe577 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/kube2iam_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/kube2iam_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.6.4' } define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/metrics_server_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/metrics_server_spec.rb index 9884ff7ab8..fe49f0b3a7 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/metrics_server_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/metrics_server_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.8.0' } define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/nginx_ingress_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/nginx_ingress_spec.rb index 6b978a3f10..5385e6abe5 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/nginx_ingress_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/nginx_ingress_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.6.4' } define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/kubernetes_addons/spec/classes/tiller_spec.rb b/puppet/modules/kubernetes_addons/spec/classes/tiller_spec.rb index bdb80dcc87..6cc3431356 100644 --- a/puppet/modules/kubernetes_addons/spec/classes/tiller_spec.rb +++ b/puppet/modules/kubernetes_addons/spec/classes/tiller_spec.rb @@ -7,9 +7,12 @@ class kubernetes{ $version = '1.10.6' } define kubernetes::apply( + Enum['present', 'absent'] $ensure = 'present', $manifests, ){ - kubernetes::addon_manager_labels($manifests[0]) + if $manifests and $ensure == 'present' { + kubernetes::addon_manager_labels($manifests[0]) + } } " end diff --git a/puppet/modules/prometheus/manifests/blackbox_exporter.pp b/puppet/modules/prometheus/manifests/blackbox_exporter.pp index 2cf60ac61a..e17b2cd211 100644 --- a/puppet/modules/prometheus/manifests/blackbox_exporter.pp +++ b/puppet/modules/prometheus/manifests/blackbox_exporter.pp @@ -11,6 +11,7 @@ # Setup deployment for blackbox exporter in cluster kubernetes::apply{'blackbox-exporter': + ensure => $::prometheus::ensure, manifests => [ template('prometheus/prometheus-ns.yaml.erb'), template('prometheus/blackbox-exporter-deployment.yaml.erb'), diff --git a/puppet/modules/prometheus/manifests/blackbox_exporter_etcd.pp b/puppet/modules/prometheus/manifests/blackbox_exporter_etcd.pp index e97057b09a..f5f8bf86f6 100644 --- a/puppet/modules/prometheus/manifests/blackbox_exporter_etcd.pp +++ b/puppet/modules/prometheus/manifests/blackbox_exporter_etcd.pp @@ -109,12 +109,12 @@ mode => '0755', } -> file { "${config_dir}/blackbox_exporter.yaml": - ensure => file, + ensure => $::prometheus::ensure, content => template('prometheus/blackbox_exporter.yaml.erb'), } file { "${systemd_path}/blackbox-exporter.service": - ensure => file, + ensure => $::prometheus::ensure, content => template('prometheus/blackbox_exporter.service.erb'), } ~> exec { "${module_name}-systemctl-daemon-reload": @@ -123,8 +123,8 @@ } service { 'blackbox-exporter': - ensure => running, - enable => true, + ensure => $::prometheus::service_ensure, + enable => $::prometheus::service_enable, subscribe => [ Archive["${dest_dir}/blackbox_exporter"], File["${config_dir}/blackbox_exporter.yaml"], diff --git a/puppet/modules/prometheus/manifests/init.pp b/puppet/modules/prometheus/manifests/init.pp index 67f6132d5c..236957d9bc 100644 --- a/puppet/modules/prometheus/manifests/init.pp +++ b/puppet/modules/prometheus/manifests/init.pp @@ -7,8 +7,16 @@ Optional[Integer[1025,65535]] $etcd_k8s_events_port = $::prometheus::params::etcd_k8s_events_port, Optional[Integer[1024,65535]] $etcd_overlay_port = $::prometheus::params::etcd_overlay_port, String $mode = 'Full', + Enum['present', 'absent'] $ensure = 'present', ) inherits ::prometheus::params { + if $ensure == 'present' { + $service_ensure = 'running' + $service_enable = true + } else { + $service_ensure = 'stopped' + $service_enable = false + } if $role == 'master' { if $mode == 'Full' { diff --git a/puppet/modules/prometheus/manifests/kube_state_metrics.pp b/puppet/modules/prometheus/manifests/kube_state_metrics.pp index 8d94e25905..4c49916484 100644 --- a/puppet/modules/prometheus/manifests/kube_state_metrics.pp +++ b/puppet/modules/prometheus/manifests/kube_state_metrics.pp @@ -23,6 +23,7 @@ } kubernetes::apply{'kube-state-metrics': + ensure => $::prometheus::ensure, manifests => [ template('prometheus/prometheus-ns.yaml.erb'), template('prometheus/kube-state-metrics-deployment.yaml.erb'), diff --git a/puppet/modules/prometheus/manifests/node_exporter.pp b/puppet/modules/prometheus/manifests/node_exporter.pp index e72f4acb6c..62e8119894 100644 --- a/puppet/modules/prometheus/manifests/node_exporter.pp +++ b/puppet/modules/prometheus/manifests/node_exporter.pp @@ -73,38 +73,44 @@ } if $::prometheus::mode == 'Full' { - kubernetes::apply{'node-exporter': - manifests => [ - template('prometheus/prometheus-ns.yaml.erb'), - template('prometheus/node-exporter-ds.yaml.erb'), - ], - } + $node_ensure = $::prometheus::ensure + } else { + $node_ensure = 'absent' + } + + kubernetes::apply{'node-exporter': + ensure => $node_ensure, + manifests => [ + template('prometheus/prometheus-ns.yaml.erb'), + template('prometheus/node-exporter-ds.yaml.erb'), + ], + } - # scrape node exporter running on every kubernetes node (through api proxy) - prometheus::scrape_config { 'kubernetes-nodes-exporter': - order => 130, - config => { - 'kubernetes_sd_configs' => [{ - 'role' => 'node', + # scrape node exporter running on every kubernetes node (through api proxy) + prometheus::scrape_config { 'kubernetes-nodes-exporter': + ensure => $node_ensure, + order => 130, + config => { + 'kubernetes_sd_configs' => [{ + 'role' => 'node', + }], + 'tls_config' => { + 'ca_file' => $kubernetes_ca_file, + }, + 'bearer_token_file' => $kubernetes_token_file, + 'scheme' => 'https', + 'relabel_configs' => [{ + 'action' => 'labelmap', + 'regex' => '__meta_kubernetes_node_label_(.+)', + },{ + 'target_label' => '__address__', + 'replacement' => 'kubernetes.default.svc:443', + }, { + 'source_labels' => ['__meta_kubernetes_node_name'], + 'regex' => '(.+)', + 'target_label' => '__metrics_path__', + 'replacement' => "/api/v1/nodes/\${1}:${port}/proxy/metrics", }], - 'tls_config' => { - 'ca_file' => $kubernetes_ca_file, - }, - 'bearer_token_file' => $kubernetes_token_file, - 'scheme' => 'https', - 'relabel_configs' => [{ - 'action' => 'labelmap', - 'regex' => '__meta_kubernetes_node_label_(.+)', - },{ - 'target_label' => '__address__', - 'replacement' => 'kubernetes.default.svc:443', - }, { - 'source_labels' => ['__meta_kubernetes_node_name'], - 'regex' => '(.+)', - 'target_label' => '__metrics_path__', - 'replacement' => "/api/v1/nodes/\${1}:${port}/proxy/metrics", - }], - } } } } @@ -131,13 +137,13 @@ provider => 'airworthy', } -> file { "${::prometheus::systemd_path}/node-exporter.service": - ensure => file, + ensure => $prometheus::ensure, content => template('prometheus/node-exporter.service.erb'), notify => Exec["${module_name}-systemctl-daemon-reload"], } ~> service { 'node-exporter.service': - ensure => running, - enable => true, + ensure => $::prometheus::service_ensure, + enable => $::prometheus::service_enable, require => Exec["${module_name}-systemctl-daemon-reload"], } } diff --git a/puppet/modules/prometheus/manifests/rule.pp b/puppet/modules/prometheus/manifests/rule.pp index 8e7c4b3827..e56a2d5c0e 100644 --- a/puppet/modules/prometheus/manifests/rule.pp +++ b/puppet/modules/prometheus/manifests/rule.pp @@ -6,6 +6,8 @@ $labels = {'severity' => 'page'}, Integer $order = 10, ) { + include ::prometheus + if ! defined(Class['kubernetes::apiserver']) { fail('This defined type can only be used on the kubernetes master') } @@ -28,8 +30,8 @@ }] } - kubernetes::apply_fragment { "prometheus-rules-${title}": + ensure => $::prometheus::ensure, content => template('prometheus/prometheus-rule.yaml.erb'), order => $order, target => 'prometheus-rules', diff --git a/puppet/modules/prometheus/manifests/scrape_config.pp b/puppet/modules/prometheus/manifests/scrape_config.pp index c8d18d8273..84580f328e 100644 --- a/puppet/modules/prometheus/manifests/scrape_config.pp +++ b/puppet/modules/prometheus/manifests/scrape_config.pp @@ -2,12 +2,16 @@ Integer $order, $config = {}, $job_name = $title, + $ensure = $::prometheus::ensure, ) { + include ::prometheus + if ! defined(Class['kubernetes::apiserver']) { fail('This defined type can only be used on the kubernetes master') } kubernetes::apply_fragment { "prometheus-scrape-config-${job_name}": + ensure => $ensure, content => template('prometheus/prometheus-config-frag.yaml.erb'), order => 400 + $order, target => 'prometheus-config', diff --git a/puppet/modules/prometheus/manifests/server.pp b/puppet/modules/prometheus/manifests/server.pp index 962a90dadb..bad832e017 100644 --- a/puppet/modules/prometheus/manifests/server.pp +++ b/puppet/modules/prometheus/manifests/server.pp @@ -33,6 +33,7 @@ } kubernetes::apply{'prometheus-server': + ensure => $::prometheus::ensure, manifests => [ template('prometheus/prometheus-ns.yaml.erb'), template('prometheus/prometheus-deployment.yaml.erb'), @@ -41,37 +42,43 @@ } kubernetes::apply{'prometheus-config': - type => 'concat', + ensure => $::prometheus::ensure, + type => 'concat', } kubernetes::apply_fragment { 'prometheus-config-header': - content => template('prometheus/prometheus-config-header.yaml.erb'), - order => 0, - target => 'prometheus-config', + ensure => $::prometheus::ensure, + content => template('prometheus/prometheus-config-header.yaml.erb'), + order => 0, + target => 'prometheus-config', } kubernetes::apply_fragment { 'prometheus-config-prometheus-file': - content => ' prometheus.yaml: |-', - order => 100, - target => 'prometheus-config', + ensure => $::prometheus::ensure, + content => ' prometheus.yaml: |-', + order => 100, + target => 'prometheus-config', } kubernetes::apply_fragment { 'prometheus-config-prometheus-rules': - content => template('prometheus/prometheus-config-rules.yaml.erb'), - order => 200, - target => 'prometheus-config', + ensure => $::prometheus::ensure, + content => template('prometheus/prometheus-config-rules.yaml.erb'), + order => 200, + target => 'prometheus-config', } kubernetes::apply_fragment { 'prometheus-config-global': - content => template('prometheus/prometheus-config-global.yaml.erb'), - order => 300, - target => 'prometheus-config', + ensure => $::prometheus::ensure, + content => template('prometheus/prometheus-config-global.yaml.erb'), + order => 300, + target => 'prometheus-config', } kubernetes::apply_fragment { 'prometheus-config-global-pre-scrape-config': - content => ' scrape_configs:', - order => 400, - target => 'prometheus-config', + ensure => $::prometheus::ensure, + content => ' scrape_configs:', + order => 400, + target => 'prometheus-config', } if $::prometheus::mode == 'Full' { @@ -457,13 +464,15 @@ } kubernetes::apply{'prometheus-rules': - type => 'concat', + ensure => $::prometheus::ensure, + type => 'concat', } kubernetes::apply_fragment { 'prometheus-rules-header': - content => template('prometheus/prometheus-rules-header.yaml.erb'), - order => 0, - target => 'prometheus-rules', + ensure => $::prometheus::ensure, + content => template('prometheus/prometheus-rules-header.yaml.erb'), + order => 0, + target => 'prometheus-rules', } diff --git a/puppet/modules/prometheus/spec/defines/scrape_config_spec.rb b/puppet/modules/prometheus/spec/defines/scrape_config_spec.rb index 34dda164d4..a360bc4862 100644 --- a/puppet/modules/prometheus/spec/defines/scrape_config_spec.rb +++ b/puppet/modules/prometheus/spec/defines/scrape_config_spec.rb @@ -3,6 +3,7 @@ describe 'prometheus::scrape_config', :type => :define do let(:pre_condition) {[ 'include kubernetes::apiserver', + 'include prometheus', ]} context 'test scrape static_configs definition' do diff --git a/puppet/modules/tarmak/manifests/fluent_bit.pp b/puppet/modules/tarmak/manifests/fluent_bit.pp index 7c15813762..60cd1c0eb8 100644 --- a/puppet/modules/tarmak/manifests/fluent_bit.pp +++ b/puppet/modules/tarmak/manifests/fluent_bit.pp @@ -7,5 +7,4 @@ config => $fluent_bit_config, } } - }