Skip to content
This repository has been archived by the owner on Jan 9, 2023. It is now read-only.

Commit

Permalink
Merge pull request #752 from JoshVanL/ensure-puppet-manifests
Browse files Browse the repository at this point in the history
Ensure Kubernetes resources are absent when service is disabled
  • Loading branch information
jetstack-bot authored Feb 22, 2019
2 parents f78e392 + fe4314d commit 7558e2e
Show file tree
Hide file tree
Showing 65 changed files with 465 additions and 235 deletions.
113 changes: 70 additions & 43 deletions pkg/puppet/puppet.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,16 @@ func kubernetesClusterConfig(conf *clusterv1alpha1.ClusterKubernetes, hieraData
}

// enable prometheus if set, default: enabled
hieraData.classes = append(hieraData.classes, `prometheus`)
if conf.Prometheus == nil || conf.Prometheus.Enabled {
mode := clusterv1alpha1.PrometheusModeFull
if conf.Prometheus != nil && conf.Prometheus.Mode != "" {
mode = conf.Prometheus.Mode
}
hieraData.variables = append(hieraData.variables, fmt.Sprintf("prometheus::mode: %s", mode))
hieraData.classes = append(hieraData.classes, `prometheus`)
hieraData.variables = append(hieraData.variables, `prometheus::ensure: "present"`)
} else {
hieraData.variables = append(hieraData.variables, `prometheus::ensure: "absent"`)
}

globalGates := make(map[string]bool)
Expand Down Expand Up @@ -275,35 +278,40 @@ func kubernetesClusterConfigPerRole(conf *clusterv1alpha1.ClusterKubernetes, rol
return
}

if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.ClusterAutoscaler != nil && conf.ClusterAutoscaler.Enabled {
if roleName == clusterv1alpha1.KubernetesMasterRoleName {
hieraData.classes = append(hieraData.classes, `kubernetes_addons::cluster_autoscaler`)
if conf.ClusterAutoscaler.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::image: "%s"`, conf.ClusterAutoscaler.Image))
}
if conf.ClusterAutoscaler.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::version: "%s"`, conf.ClusterAutoscaler.Version))
}
if conf.ClusterAutoscaler != nil && conf.ClusterAutoscaler.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::ensure: "present"`)
if conf.ClusterAutoscaler.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::image: "%s"`, conf.ClusterAutoscaler.Image))
}
if conf.ClusterAutoscaler.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::version: "%s"`, conf.ClusterAutoscaler.Version))
}

if conf.ClusterAutoscaler.ScaleDownUtilizationThreshold != nil {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::scale_down_utilization_threshold: %v`, *conf.ClusterAutoscaler.ScaleDownUtilizationThreshold))
}
if conf.ClusterAutoscaler.ScaleDownUtilizationThreshold != nil {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::scale_down_utilization_threshold: %v`, *conf.ClusterAutoscaler.ScaleDownUtilizationThreshold))
}

if conf.ClusterAutoscaler.Overprovisioning != nil && conf.ClusterAutoscaler.Overprovisioning.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::enable_overprovisioning: true`)
hieraData.variables = append(hieraData.variables, `kubernetes::enable_pod_priority: true`)
if conf.ClusterAutoscaler.Overprovisioning != nil && conf.ClusterAutoscaler.Overprovisioning.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::enable_overprovisioning: true`)
hieraData.variables = append(hieraData.variables, `kubernetes::enable_pod_priority: true`)

if conf.ClusterAutoscaler.Overprovisioning.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_image: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Image))
}
if conf.ClusterAutoscaler.Overprovisioning.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_version: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Version))
}
if conf.ClusterAutoscaler.Overprovisioning.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_image: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Image))
}
if conf.ClusterAutoscaler.Overprovisioning.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::proportional_version: "%s"`, conf.ClusterAutoscaler.Overprovisioning.Version))
}

hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_millicores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_megabytes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::cores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.CoresPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::nodes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.NodesPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::replica_count: %d`, conf.ClusterAutoscaler.Overprovisioning.ReplicaCount))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_millicores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMillicoresPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::reserved_megabytes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.ReservedMegabytesPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::cores_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.CoresPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::nodes_per_replica: %d`, conf.ClusterAutoscaler.Overprovisioning.NodesPerReplica))
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::cluster_autoscaler::replica_count: %d`, conf.ClusterAutoscaler.Overprovisioning.ReplicaCount))
}
} else {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::cluster_autoscaler::ensure: "absent"`)
}
}

Expand All @@ -313,36 +321,52 @@ func kubernetesClusterConfigPerRole(conf *clusterv1alpha1.ClusterKubernetes, rol
}
}

if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.Tiller != nil && conf.Tiller.Enabled {
if roleName == clusterv1alpha1.KubernetesMasterRoleName {
hieraData.classes = append(hieraData.classes, `kubernetes_addons::tiller`)
if conf.Tiller.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::image: "%s"`, conf.Tiller.Image))
}
if conf.Tiller.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::version: "%s"`, conf.Tiller.Version))
if conf.Tiller != nil && conf.Tiller.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::tiller::ensure: "present"`)
if conf.Tiller.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::image: "%s"`, conf.Tiller.Image))
}
if conf.Tiller.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::tiller::version: "%s"`, conf.Tiller.Version))
}
} else {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::tiller::ensure: "absent"`)
}
}

if roleName == clusterv1alpha1.KubernetesMasterRoleName && conf.Dashboard != nil && conf.Dashboard.Enabled {
if roleName == clusterv1alpha1.KubernetesMasterRoleName {
hieraData.classes = append(hieraData.classes, `kubernetes_addons::dashboard`)
if conf.Dashboard.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::image: "%s"`, conf.Dashboard.Image))
}
if conf.Dashboard.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::version: "%s"`, conf.Dashboard.Version))
if conf.Dashboard != nil && conf.Dashboard.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::dashboard::ensure: "present"`)
if conf.Dashboard.Image != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::image: "%s"`, conf.Dashboard.Image))
}
if conf.Dashboard.Version != "" {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::dashboard::version: "%s"`, conf.Dashboard.Version))
}
} else {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::dashboard::ensure: "absent"`)
}
}

if g := conf.Grafana; g != nil {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::grafana::enabled: %t`, conf.Grafana.Enabled))
if g := conf.Grafana; g != nil && conf.Grafana.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::grafana::ensure: "present"`)
} else {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::grafana::ensure: "absent"`)
}

if h := conf.Heapster; h != nil {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::heapster::enabled: %t`, conf.Heapster.Enabled))
if h := conf.Heapster; h != nil && conf.Heapster.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::heapster::ensure: "present"`)
} else {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::heapster::ensure: "absent"`)
}

if i := conf.Heapster; i != nil {
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`kubernetes_addons::influxdb::enabled: %t`, conf.InfluxDB.Enabled))
if i := conf.Heapster; i != nil && conf.InfluxDB.Enabled {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::influxdb::ensure: "present"`)
} else {
hieraData.variables = append(hieraData.variables, `kubernetes_addons::influxdb::ensure: "absent"`)
}

return
Expand Down Expand Up @@ -378,6 +402,9 @@ func (p *Puppet) contentClusterConfig(cluster interfaces.Cluster) ([]string, err
return nil, fmt.Errorf("unable to marshall logging sinks: %s", err)
}
hieraData.variables = append(hieraData.variables, fmt.Sprintf(`tarmak::fluent_bit_configs: %s`, string(jsonLoggingSink)))
hieraData.variables = append(hieraData.variables, `fluent_bit::ensure: "present"`)
} else {
hieraData.variables = append(hieraData.variables, `fluent_bit::ensure: "absent"`)
}

if v := cluster.Config().VaultHelper; v != nil {
Expand Down
17 changes: 12 additions & 5 deletions puppet/modules/aws_es_proxy/manifests/instance.pp
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
Boolean $tls = true,
Integer $dest_port = 9200,
Integer $listen_port = 9200,
Enum['running', 'stopped'] $ensure_service = 'running',
Boolean $enable_service = true
Enum['present', 'absent'] $ensure = 'present'
){
include ::aws_es_proxy

Expand All @@ -27,23 +26,31 @@
}
}

if $ensure == 'present' {
$service_ensure = 'running'
$service_enable = true
} else {
$service_ensure = 'stopped'
$service_enable = false
}

exec { "${service_name}-systemctl-daemon-reload":
command => 'systemctl daemon-reload',
refreshonly => true,
path => $path,
}

file{ "/etc/systemd/system/${service_name}.service":
ensure => 'file',
ensure => file,
mode => '0644',
owner => 'root',
group => 'root',
content => template('aws_es_proxy/aws-es-proxy.service.erb'),
notify => Exec["${service_name}-systemctl-daemon-reload"]
}
~> service { "${service_name}.service":
ensure => $ensure_service,
enable => $enable_service,
ensure => $service_ensure,
enable => $service_enable,
hasstatus => true,
hasrestart => true,
subscribe => Class['aws_es_proxy::install']
Expand Down
2 changes: 2 additions & 0 deletions puppet/modules/calico/manifests/config.pp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
}

kubernetes::apply{'calico-config':
ensure => 'present',
manifests => [
template('calico/configmap_etcd.yaml.erb'),
],
Expand All @@ -26,6 +27,7 @@
$pod_network = $::calico::pod_network

kubernetes::apply{'calico-config':
ensure => 'present',
manifests => [
template('calico/configmap_kubernetes.yaml.erb'),
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
$aws_region = $::ec2_metadata['placement']['availability-zone'][0,-2]

kubernetes::apply{'disable-srcdest-node':
ensure => 'present',
manifests => [
template('calico/disable-source-destination.yaml.erb'),
],
Expand Down
1 change: 1 addition & 0 deletions puppet/modules/calico/manifests/node.pp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
}

kubernetes::apply{'calico-node':
ensure => 'present',
manifests => [
template('calico/node-rbac.yaml.erb'),
template('calico/node-daemonset.yaml.erb'),
Expand Down
14 changes: 9 additions & 5 deletions puppet/modules/calico/manifests/policy_controller.pp
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,22 @@

if $backend == 'etcd' {
$namespace = $::calico::namespace
$ensure = 'present'

if $::calico::etcd_proto == 'https' {
$etcd_tls_dir = $::calico::etcd_tls_dir
$tls = true
} else {
$tls = false
}
} else {
$ensure = 'absent'
}

kubernetes::apply{'calico-policy-controller':
manifests => [
template('calico/policy-controller-deployment.yaml.erb'),
],
}
kubernetes::apply{'calico-policy-controller':
ensure => $ensure,
manifests => [
template('calico/policy-controller-deployment.yaml.erb'),
],
}
}
7 changes: 5 additions & 2 deletions puppet/modules/calico/spec/classes/config_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,12 @@
"
class kubernetes{}
define kubernetes::apply(
$manifests,
Enum['present', 'absent'] $ensure = 'present',
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
if $manifests and $ensure == 'present' {
kubernetes::addon_manager_labels($manifests[0])
}
}
class{'calico':
#{cloud_provider}
Expand Down
14 changes: 10 additions & 4 deletions puppet/modules/calico/spec/classes/init_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,12 @@ class kubernetes{
$version = '1.7.10'
}
define kubernetes::apply(
$manifests,
Enum['present', 'absent'] $ensure = 'present',
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
if $manifests and $ensure == 'present' {
kubernetes::addon_manager_labels($manifests[0])
}
}
"
end
Expand Down Expand Up @@ -55,9 +58,12 @@ class kubernetes{
class kubernetes::apiserver{}
include kubernetes::apiserver
define kubernetes::apply(
$manifests,
Enum['present', 'absent'] $ensure = 'present',
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
if $manifests and $ensure == 'present' {
kubernetes::addon_manager_labels($manifests[0])
}
}
"
end
Expand Down
7 changes: 5 additions & 2 deletions puppet/modules/calico/spec/classes/node_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,12 @@ class kubernetes{
$_apiserver_insecure_port = 1000
}
define kubernetes::apply(
$manifests,
Enum['present', 'absent'] $ensure = 'present',
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
if $manifests and $ensure == 'present' {
kubernetes::addon_manager_labels($manifests[0])
}
}
class{'calico':
#{mtu}
Expand Down
7 changes: 5 additions & 2 deletions puppet/modules/calico/spec/classes/policy_controller_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,12 @@ class kubernetes{
$version = '#{kubernetes_version}'
}
define kubernetes::apply(
$manifests,
Enum['present', 'absent'] $ensure = 'present',
$manifests,
){
kubernetes::addon_manager_labels($manifests[0])
if $manifests and $ensure == 'present' {
kubernetes::addon_manager_labels($manifests[0])
}
}
class{'calico':
#{mtu}
Expand Down
2 changes: 1 addition & 1 deletion puppet/modules/fluent_bit/manifests/config.pp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
}

file { '/etc/td-agent-bit/td-agent-bit.conf':
ensure => file,
ensure => $::fluent_bit::ensure,
mode => '0644',
owner => 'root',
group => 'root',
Expand Down
1 change: 1 addition & 0 deletions puppet/modules/fluent_bit/manifests/daemonset.pp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
}

kubernetes::apply{'fluent-bit':
ensure => $::fluent_bit::ensure,
manifests => [
template('fluent_bit/fluent-bit-configmap.yaml.erb'),
template('fluent_bit/fluent-bit-daemonset.yaml.erb'),
Expand Down
1 change: 1 addition & 0 deletions puppet/modules/fluent_bit/manifests/init.pp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
class fluent_bit (
$package_name = $::fluent_bit::params::package_name,
$service_name = $::fluent_bit::params::service_name,
Enum['present', 'absent'] $ensure = 'present',
) inherits ::fluent_bit::params {

$path = defined('$::path') ? {
Expand Down
Loading

0 comments on commit 7558e2e

Please sign in to comment.