diff --git a/build.rs b/build.rs index f25018c6..f9718357 100644 --- a/build.rs +++ b/build.rs @@ -21,6 +21,7 @@ fn generate_protobuf_code() -> Result<()> { prost_build.compile_protos( &[ + "k8s.io/api/batch/v1/generated.proto", "k8s.io/api/core/v1/generated.proto", "k8s.io/api/admissionregistration/v1/generated.proto", "k8s.io/api/apps/v1/generated.proto", diff --git a/hack/dummy_config.yaml b/hack/dummy_config.yaml new file mode 100644 index 00000000..f19baffe --- /dev/null +++ b/hack/dummy_config.yaml @@ -0,0 +1,100 @@ +dry_run: false +etcd_endpoint: localhost:2379 +crypto_dirs: +- backup/etc/kubernetes +- backup/var/lib/kubelet +- backup/etc/machine-config-daemon +crypto_files: +- backup/etc/mcs-machine-config-content.json +cluster_customization_dirs: +- backup/etc/kubernetes +- backup/var/lib/kubelet +- backup/etc/machine-config-daemon +- backup/etc/pki/ca-trust +cluster_customization_files: +- backup/etc/mcs-machine-config-content.json +- backup/etc/mco/proxy.env +cn_san_replace_rules: +- api-int.seed.redhat.com:api-int.new-name.foo.com +- api.seed.redhat.com:api.new-name.foo.com +- "*.apps.seed.redhat.com:*.apps.new-name.foo.com" +- 192.168.126.10:192.168.127.11 +use_cert_rules: +- | + -----BEGIN CERTIFICATE----- + MIICyzCCAbMCFAoie5EUqnUAHimqxbJBHV0MGVbwMA0GCSqGSIb3DQEBCwUAMCIx + IDAeBgNVBAMMF2FkbWluLWt1YmVjb25maWctc2lnbmVyMB4XDTI0MDEwOTEzMTky + NVoXDTI0MDIwODEzMTkyNVowIjEgMB4GA1UEAwwXYWRtaW4ta3ViZWNvbmZpZy1z + aWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2fz96uc8fDoNV + RaBB9iQ+i5Y76IZf0XOdGID8WVaqPlqH+NgLUaFa39T+78FhZW3794Lbeyu/PnYT + ufMyKnJEulVO7W7gPHaqWyuN08/m6SH5ycTEgUAXK1q1yVR/vM6HnV/UPUCfbDaW + RFOrUgGNwNywhEjqyzyUxJFixxS6Rk7JmouROD2ciNhBn6wNFByVHN9j4nQUOhXC + A0JjuiPH7ybvcHjmg3mKDJusyVq4pl0faahOxn0doILfXaHHwRxyEnP3V3arpPer + FvwlHh2Cfat+ijFPSD9pN3KmoeAviOHZVLQ/jKzkQvzlvva3mhEpLE5Zje1lMpvq + fjDheW9bAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAC7oi/Ht0lidcx6XvOBz6W1m + LU02e2yHuDzw6E3WuNoqAdPpleFRV4mLDnv8mEavH5sje0L5veHtOq3Ny4pc06B+ + ETB2aCW4GQ4mPvN9Jyi6sxLQQaVLpFrtPPB08NawNbbcYWUrAihO1uIXLhaCYZWw + H3aWlqRvGECazYZIPcFoV20jygrcwMhixSZjYyHhJN0LYO5sjiKcMnI8EkHuqE17 + 7CPogicZte+m49Mo+f7b8asmKBSafdTUSVAt9Q3Fc3PTJSMW5lxfx1vIR/og33WJ + BgIejfD1dYW2Fp02z5sF6Pw6vhobpfDYgsTAKNonh5P6NxMiD14eQxYrNJ6DAF0= + -----END CERTIFICATE----- +cluster_rename: new-name:foo.com:some-random-infra-id +hostname: test.hostname +ip: 192.168.126.99 +proxy: http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3128|http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3130|.cluster.local,.kni-qe-2.lab.eng.rdu2.redhat.com,.svc,127.0.0.1,2620:52:0:11c::/64,2620:52:0:11c::1,2620:52:0:11c::10,2620:52:0:11c::11,2620:52:0:199::/64,api-int.kni-qe-2.lab.eng.rdu2.redhat.com,fd01::/48,fd02::/112,localhost +kubeadmin_password_hash: "$2a$10$20Q4iRLy7cWZkjn/D07bF.RZQZonKwstyRGH0qiYbYRkx5Pe4Ztyi" +additional_trust_bundle: | + # Foo + -----BEGIN CERTIFICATE----- + MIIDZTCCAk2gAwIBAgIUP+AxIkXJXTEhNGLH2qjmE6Gp0fowDQYJKoZIhvcNAQEL + BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE + CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yNDAzMDExMDIyNTlaFw0yNTAzMDEx + MDIyNTlaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa + BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB + DwAwggEKAoIBAQC0wzg+7X2Amb5g60g0TstLgC0XnJRZq/YZUUsJMmm3qMb/+GYJ + AJzHxiycUfbRJtYvjx0SBmAX/kDRVCEQKcN5d/y3zeq709YO40kvouScfstsxM8l + PFLOmM8/Dqey1WblSJERBLbLherDnMwR7EMXkyZ/AfHUXmhVoIZE9ywsZpNcVW6Z + 7x/+Izbj1s305vrxEkZDw6b3oMG5uooQgP5NZFXSamzJgviP0L/usvbRMtAWphoj + WhMeNuOdymLwRzm2l+2Qp/JDWktgHccmrbbi1c6pwhsIJBj4KOyb9zROTnYXyS/j + 0b7GzVcffveV6E58rGa2ILyIsCv6gt8LgFnxAgMBAAGjUzBRMB0GA1UdDgQWBBQ5 + nh0SeZxZ969ps+9ywPEoOVasxTAfBgNVHSMEGDAWgBQ5nh0SeZxZ969ps+9ywPEo + OVasxTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAWxsqfdm8h + AeNY8vPcRdtB9KYU5sZLs4NtlBFSdn+pHmeXZwwkjQDNhVcEMKdpZI4BpS11Ggwh + 1d3BCCC/5M6yVqm+EMKJvA9VCeM8d1WJ1yVyXcgGuegf8kr3v+lr7Ll59qZGP5Ir + WwE8WRns7uFOCqYJCxo1VFXitZZuIugr3NUSimBPoJf1hDYdye3K3Q+grF2GyNII + 5Yo+/VSR4ejIvJYAFp91Ycep7S0/+qhFpsjEG0Qw3Ly6WqQoCqdmIsyqFgWHsIlY + oJxV5wTX/c9DDZLR0VUD19aDV3B9kb7Cf+h7S4RsORWCyi7+58FKkkD6Ryc0I1K6 + xw3RWhfd9o1d + -----END CERTIFICATE----- + + + + # All + # the Bars + -----BEGIN CERTIFICATE----- + MIIDZTCCAk2gAwIBAgIULnisjJLte3Vvt4o1f+5vSQg542cwDQYJKoZIhvcNAQEL + BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE + CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yNDAzMDExMDI1MDFaFw0yNTAzMDEx + MDI1MDFaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa + BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB + DwAwggEKAoIBAQC2dhK7xTnoTB3wN1l3NsLTp5YR0KFfBTjMcDgSzUy/GN79c2cF + JzSuiYUi7SCmFjn3soNqpXHFzCox6KIs9R6PL4epaQM76EVG/Xy6mdDvFnZvqypi + wmK6J0AGajOxItYUGb2a3Zmt/2nliW6t8sW/vhovHRu7YROo4uJygIp2UUFct2Lk + 8C7XkJX5RXW+sKTiNddIjhmDFD0vHfvNvQ6AIayJTmXy272+aqYNJWB2wS/2uD3Z + +WOpiINetCtkASoiE7nzBQw+WsTfeFJH2TnI5pnSaHdLRUQtzoLO0/FgQ5WBfJg5 + aH03DLfQ9GEdzlsOkPOEgHXqDFMjTQCwcue3AgMBAAGjUzBRMB0GA1UdDgQWBBRd + 0Zs+cm0gPHGKoQrerC18Pa3B3zAfBgNVHSMEGDAWgBRd0Zs+cm0gPHGKoQrerC18 + Pa3B3zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAepPrWqB9h + JkqtgJrP8SkQVulTVKYj66J5JxM5vZR96Z4UnbA3WNxezev0jMCYuV0twHPN8avs + Jern+/n7vgQ3ziiLVdtrN8PqK1X1apSurVmaiIw4tRcv5TVL5OD95sTyJh5bUBpM + DGtCTraPZxLIDKm9byunobXtJVcutw4oHKtFy/LlFWePCnvFzvx6ZFswLAXgxhf9 + EtjDf3v0cjDn9yRzjYFrwHiQ53A75YTwFyk21q7Gh1G0yspfBeq7cej2wK1PnfiC + 42TI0UzcqRV4CWDoARMSV8yMLajZ0g1eEreUprwmFcOy17V7KCeV6E8lKb21OU8M + Ad9q3H0iXjct + -----END CERTIFICATE----- +summary_file: summary.yaml +summary_file_clean: summary_redacted.yaml +extend_expiration: true +force_expire: false +pull_secret: '{"auths":{"empty_registry":{"username":"empty","password":"empty","auth":"ZW1wdHk6ZW1wdHk=","email":""}}}' +threads: 1 diff --git a/run_seed.sh b/run_seed.sh index 95e3e498..3eda2221 100755 --- a/run_seed.sh +++ b/run_seed.sh @@ -67,106 +67,7 @@ sudo unshare --mount -- bash -c "mount --bind /dev/null .cargo/config.toml && su if [[ -n "$WITH_CONFIG" ]]; then echo "Using config" # shellcheck disable=2016 - RECERT_CONFIG=<(echo ' -dry_run: false -etcd_endpoint: localhost:2379 -crypto_dirs: -- backup/etc/kubernetes -- backup/var/lib/kubelet -- backup/etc/machine-config-daemon -crypto_files: -- backup/etc/mcs-machine-config-content.json -cluster_customization_dirs: -- backup/etc/kubernetes -- backup/var/lib/kubelet -- backup/etc/machine-config-daemon -- backup/etc/pki/ca-trust -cluster_customization_files: -- backup/etc/mcs-machine-config-content.json -cn_san_replace_rules: -- api-int.seed.redhat.com:api-int.new-name.foo.com -- api.seed.redhat.com:api.new-name.foo.com -- "*.apps.seed.redhat.com:*.apps.new-name.foo.com" -- 192.168.126.10:192.168.127.11 -use_cert_rules: -- | - -----BEGIN CERTIFICATE----- - MIICyzCCAbMCFAoie5EUqnUAHimqxbJBHV0MGVbwMA0GCSqGSIb3DQEBCwUAMCIx - IDAeBgNVBAMMF2FkbWluLWt1YmVjb25maWctc2lnbmVyMB4XDTI0MDEwOTEzMTky - NVoXDTI0MDIwODEzMTkyNVowIjEgMB4GA1UEAwwXYWRtaW4ta3ViZWNvbmZpZy1z - aWduZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC2fz96uc8fDoNV - RaBB9iQ+i5Y76IZf0XOdGID8WVaqPlqH+NgLUaFa39T+78FhZW3794Lbeyu/PnYT - ufMyKnJEulVO7W7gPHaqWyuN08/m6SH5ycTEgUAXK1q1yVR/vM6HnV/UPUCfbDaW - RFOrUgGNwNywhEjqyzyUxJFixxS6Rk7JmouROD2ciNhBn6wNFByVHN9j4nQUOhXC - A0JjuiPH7ybvcHjmg3mKDJusyVq4pl0faahOxn0doILfXaHHwRxyEnP3V3arpPer - FvwlHh2Cfat+ijFPSD9pN3KmoeAviOHZVLQ/jKzkQvzlvva3mhEpLE5Zje1lMpvq - fjDheW9bAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAC7oi/Ht0lidcx6XvOBz6W1m - LU02e2yHuDzw6E3WuNoqAdPpleFRV4mLDnv8mEavH5sje0L5veHtOq3Ny4pc06B+ - ETB2aCW4GQ4mPvN9Jyi6sxLQQaVLpFrtPPB08NawNbbcYWUrAihO1uIXLhaCYZWw - H3aWlqRvGECazYZIPcFoV20jygrcwMhixSZjYyHhJN0LYO5sjiKcMnI8EkHuqE17 - 7CPogicZte+m49Mo+f7b8asmKBSafdTUSVAt9Q3Fc3PTJSMW5lxfx1vIR/og33WJ - BgIejfD1dYW2Fp02z5sF6Pw6vhobpfDYgsTAKNonh5P6NxMiD14eQxYrNJ6DAF0= - -----END CERTIFICATE----- -cluster_rename: new-name:foo.com:some-random-infra-id -hostname: test.hostname -ip: 192.168.126.99 -kubeadmin_password_hash: "$2a$10$20Q4iRLy7cWZkjn/D07bF.RZQZonKwstyRGH0qiYbYRkx5Pe4Ztyi" -additional_trust_bundle: | - # Foo - -----BEGIN CERTIFICATE----- - MIIDZTCCAk2gAwIBAgIUP+AxIkXJXTEhNGLH2qjmE6Gp0fowDQYJKoZIhvcNAQEL - BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE - CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yNDAzMDExMDIyNTlaFw0yNTAzMDEx - MDIyNTlaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa - BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB - DwAwggEKAoIBAQC0wzg+7X2Amb5g60g0TstLgC0XnJRZq/YZUUsJMmm3qMb/+GYJ - AJzHxiycUfbRJtYvjx0SBmAX/kDRVCEQKcN5d/y3zeq709YO40kvouScfstsxM8l - PFLOmM8/Dqey1WblSJERBLbLherDnMwR7EMXkyZ/AfHUXmhVoIZE9ywsZpNcVW6Z - 7x/+Izbj1s305vrxEkZDw6b3oMG5uooQgP5NZFXSamzJgviP0L/usvbRMtAWphoj - WhMeNuOdymLwRzm2l+2Qp/JDWktgHccmrbbi1c6pwhsIJBj4KOyb9zROTnYXyS/j - 0b7GzVcffveV6E58rGa2ILyIsCv6gt8LgFnxAgMBAAGjUzBRMB0GA1UdDgQWBBQ5 - nh0SeZxZ969ps+9ywPEoOVasxTAfBgNVHSMEGDAWgBQ5nh0SeZxZ969ps+9ywPEo - OVasxTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAWxsqfdm8h - AeNY8vPcRdtB9KYU5sZLs4NtlBFSdn+pHmeXZwwkjQDNhVcEMKdpZI4BpS11Ggwh - 1d3BCCC/5M6yVqm+EMKJvA9VCeM8d1WJ1yVyXcgGuegf8kr3v+lr7Ll59qZGP5Ir - WwE8WRns7uFOCqYJCxo1VFXitZZuIugr3NUSimBPoJf1hDYdye3K3Q+grF2GyNII - 5Yo+/VSR4ejIvJYAFp91Ycep7S0/+qhFpsjEG0Qw3Ly6WqQoCqdmIsyqFgWHsIlY - oJxV5wTX/c9DDZLR0VUD19aDV3B9kb7Cf+h7S4RsORWCyi7+58FKkkD6Ryc0I1K6 - xw3RWhfd9o1d - -----END CERTIFICATE----- - - - - # All - # the Bars - -----BEGIN CERTIFICATE----- - MIIDZTCCAk2gAwIBAgIULnisjJLte3Vvt4o1f+5vSQg542cwDQYJKoZIhvcNAQEL - BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE - CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yNDAzMDExMDI1MDFaFw0yNTAzMDEx - MDI1MDFaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa - BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB - DwAwggEKAoIBAQC2dhK7xTnoTB3wN1l3NsLTp5YR0KFfBTjMcDgSzUy/GN79c2cF - JzSuiYUi7SCmFjn3soNqpXHFzCox6KIs9R6PL4epaQM76EVG/Xy6mdDvFnZvqypi - wmK6J0AGajOxItYUGb2a3Zmt/2nliW6t8sW/vhovHRu7YROo4uJygIp2UUFct2Lk - 8C7XkJX5RXW+sKTiNddIjhmDFD0vHfvNvQ6AIayJTmXy272+aqYNJWB2wS/2uD3Z - +WOpiINetCtkASoiE7nzBQw+WsTfeFJH2TnI5pnSaHdLRUQtzoLO0/FgQ5WBfJg5 - aH03DLfQ9GEdzlsOkPOEgHXqDFMjTQCwcue3AgMBAAGjUzBRMB0GA1UdDgQWBBRd - 0Zs+cm0gPHGKoQrerC18Pa3B3zAfBgNVHSMEGDAWgBRd0Zs+cm0gPHGKoQrerC18 - Pa3B3zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAepPrWqB9h - JkqtgJrP8SkQVulTVKYj66J5JxM5vZR96Z4UnbA3WNxezev0jMCYuV0twHPN8avs - Jern+/n7vgQ3ziiLVdtrN8PqK1X1apSurVmaiIw4tRcv5TVL5OD95sTyJh5bUBpM - DGtCTraPZxLIDKm9byunobXtJVcutw4oHKtFy/LlFWePCnvFzvx6ZFswLAXgxhf9 - EtjDf3v0cjDn9yRzjYFrwHiQ53A75YTwFyk21q7Gh1G0yspfBeq7cej2wK1PnfiC - 42TI0UzcqRV4CWDoARMSV8yMLajZ0g1eEreUprwmFcOy17V7KCeV6E8lKb21OU8M - Ad9q3H0iXjct - -----END CERTIFICATE----- -summary_file: summary.yaml -summary_file_clean: summary_redacted.yaml -extend_expiration: true -force_expire: false -pull_secret: "{\"auths\":{\"empty_registry\":{\"username\":\"empty\",\"password\":\"empty\",\"auth\":\"ZW1wdHk6ZW1wdHk=\",\"email\":\"\"}}}" -threads: 1 -') cargo run --release + RECERT_CONFIG="$SCRIPT_DIR/hack/dummy_config.yaml" cargo run --release else # shellcheck disable=2016 cargo run -- \ @@ -182,6 +83,7 @@ else --cluster-customization-dir backup/etc/machine-config-daemon \ --cluster-customization-dir backup/etc/pki/ca-trust \ --cluster-customization-file backup/etc/mcs-machine-config-content.json \ + --cluster-customization-file backup/etc/mco/proxy.env \ \ --cn-san-replace api-int.seed.redhat.com:api-int.new-name.foo.com \ --cn-san-replace api.seed.redhat.com:api.new-name.foo.com \ @@ -192,6 +94,7 @@ else --cluster-rename new-name:foo.com:some-random-infra-id \ --hostname test.hostname \ --ip 192.168.126.99 \ + --proxy 'http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3128|http://registry.kni-qe-0.lab.eng.rdu2.redhat.com:3130|.cluster.local,.kni-qe-2.lab.eng.rdu2.redhat.com,.svc,127.0.0.1,2620:52:0:11c::/64,2620:52:0:11c::1,2620:52:0:11c::10,2620:52:0:11c::11,2620:52:0:199::/64,api-int.kni-qe-2.lab.eng.rdu2.redhat.com,fd01::/48,fd02::/112,localhost' \ --kubeadmin-password-hash '$2a$10$20Q4iRLy7cWZkjn/D07bF.RZQZonKwstyRGH0qiYbYRkx5Pe4Ztyi' \ --additional-trust-bundle ./hack/dummy_trust_bundle.pem \ --pull-secret '{"auths":{"empty_registry":{"username":"empty","password":"empty","auth":"ZW1wdHk6ZW1wdHk=","email":""}}}' \ diff --git a/src/cluster_crypto/locations.rs b/src/cluster_crypto/locations.rs index 1242f0cd..48d62ad5 100644 --- a/src/cluster_crypto/locations.rs +++ b/src/cluster_crypto/locations.rs @@ -483,6 +483,7 @@ impl K8sResourceLocation { Some(apiversion_first_component_value) => { match apiversion_first_component_value { "operator.openshift.io" + | "monitoring.coreos.com" | "apiregistration.k8s.io" | "machineconfiguration.openshift.io" | "config.openshift.io" diff --git a/src/config.rs b/src/config.rs index 8df0a6e6..324bf275 100644 --- a/src/config.rs +++ b/src/config.rs @@ -2,7 +2,7 @@ use self::{cli::Cli, path::ConfigPath}; use crate::{ cluster_crypto::REDACT_SECRETS, cnsanreplace::{CnSanReplace, CnSanReplaceRules}, - ocp_postprocess::cluster_domain_rename::params::ClusterNamesRename, + ocp_postprocess::{cluster_domain_rename::params::ClusterNamesRename, proxy_rename::args::Proxy}, use_cert::{UseCert, UseCertRules}, use_key::{UseKey, UseKeyRules}, }; @@ -38,6 +38,7 @@ pub(crate) struct ClusterCustomizations { pub(crate) cluster_rename: Option, pub(crate) hostname: Option, pub(crate) ip: Option, + pub(crate) proxy: Option, pub(crate) kubeadmin_password_hash: Option, #[serde(serialize_with = "redact")] pub(crate) pull_secret: Option, @@ -138,6 +139,7 @@ impl RecertConfig { kubeadmin_password_hash: None, pull_secret: None, additional_trust_bundle: None, + proxy: None, }, threads: None, regenerate_server_ssh_keys: None, @@ -196,6 +198,12 @@ impl RecertConfig { Some(value) => Some(value.as_str().context("pull_secret must be a string")?.to_string()), None => None, }; + let proxy = match value.remove("proxy") { + Some(value) => Some( + Proxy::parse(value.as_str().context("proxy must be a string")?).context(format!("proxy {}", value.as_str().unwrap()))?, + ), + None => None, + }; let set_kubeadmin_password_hash = match value.remove("kubeadmin_password_hash") { Some(value) => Some(value.as_str().context("set_kubeadmin_password_hash must be a string")?.to_string()), None => None, @@ -257,6 +265,7 @@ impl RecertConfig { kubeadmin_password_hash: set_kubeadmin_password_hash, pull_secret, additional_trust_bundle, + proxy, }; let recert_config = Self { @@ -326,6 +335,7 @@ impl RecertConfig { cluster_rename: cli.cluster_rename, hostname: cli.hostname, ip: cli.ip, + proxy: cli.proxy, kubeadmin_password_hash: cli.kubeadmin_password_hash, pull_secret: cli.pull_secret, additional_trust_bundle: cli.additional_trust_bundle, diff --git a/src/config/cli.rs b/src/config/cli.rs index b5a41cb9..77ab696d 100644 --- a/src/config/cli.rs +++ b/src/config/cli.rs @@ -1,5 +1,8 @@ use crate::{ - cnsanreplace::CnSanReplace, ocp_postprocess::cluster_domain_rename::params::ClusterNamesRename, use_cert::UseCert, use_key::UseKey, + cnsanreplace::CnSanReplace, + ocp_postprocess::{cluster_domain_rename::params::ClusterNamesRename, proxy_rename::args::Proxy}, + use_cert::UseCert, + use_key::UseKey, }; use clap::Parser; use clio::ClioPath; @@ -67,6 +70,10 @@ pub(crate) struct Cli { #[clap(long)] pub(crate) ip: Option, + /// If given, the cluster's HTTP proxy configuration will be modified to use this one instead. + #[clap(long, value_parser = Proxy::parse)] + pub(crate) proxy: Option, + /// Modify the OCP kubeadmin password secret hash. If given but empty, the kubeadmin password /// secret will be deleted (thus disabling password login). If given and non-empty, the secret /// will be updated with the given password hash, unless no existing kubeadmin secret resource diff --git a/src/etcd_encoding.rs b/src/etcd_encoding.rs index f446cfc9..48aa2a6f 100644 --- a/src/etcd_encoding.rs +++ b/src/etcd_encoding.rs @@ -3,7 +3,8 @@ use super::protobuf_gen::{ k8s::io::{ api::{ admissionregistration::v1::{MutatingWebhookConfiguration, ValidatingWebhookConfiguration}, - apps::v1::{DaemonSet, Deployment}, + apps::v1::{ControllerRevision, DaemonSet, Deployment, StatefulSet}, + batch::v1::{CronJob, Job}, core::v1::{ConfigMap, Secret}, }, apimachinery::pkg::runtime::{TypeMeta, Unknown}, @@ -50,6 +51,10 @@ macro_rules! k8s_type { k8s_type!(RouteWithMeta, Route); k8s_type!(DaemonsSetWithMeta, DaemonSet); k8s_type!(DeploymentWithMeta, Deployment); +k8s_type!(ControllerRevisionWithMeta, ControllerRevision); +k8s_type!(JobWithMeta, Job); +k8s_type!(CronJobWithMeta, CronJob); +k8s_type!(StatefulSetWithMeta, StatefulSet); k8s_type!(ConfigMapWithMeta, ConfigMap); k8s_type!(SecretWithMeta, Secret); k8s_type!(ValidatingWebhookConfigurationWithMeta, ValidatingWebhookConfiguration); @@ -67,6 +72,10 @@ pub(crate) async fn decode(data: &[u8]) -> Result> { Ok(match kind { "Route" => serde_json::to_vec(&RouteWithMeta::try_from(unknown)?)?, "Deployment" => serde_json::to_vec(&DeploymentWithMeta::try_from(unknown)?)?, + "ControllerRevision" => serde_json::to_vec(&ControllerRevisionWithMeta::try_from(unknown)?)?, + "Job" => serde_json::to_vec(&JobWithMeta::try_from(unknown)?)?, + "CronJob" => serde_json::to_vec(&CronJobWithMeta::try_from(unknown)?)?, + "StatefulSet" => serde_json::to_vec(&StatefulSetWithMeta::try_from(unknown)?)?, "DaemonSet" => serde_json::to_vec(&DaemonsSetWithMeta::try_from(unknown)?)?, "ConfigMap" => serde_json::to_vec(&ConfigMapWithMeta::try_from(unknown)?)?, "Secret" => serde_json::to_vec(&SecretWithMeta::try_from(unknown)?)?, @@ -93,6 +102,10 @@ pub(crate) async fn encode(data: &[u8]) -> Result> { "Route" => Unknown::from(serde_json::from_slice::(data)?), "Secret" => Unknown::from(serde_json::from_slice::(data)?), "Deployment" => Unknown::from(serde_json::from_slice::(data)?), + "ControllerRevision" => Unknown::from(serde_json::from_slice::(data)?), + "Job" => Unknown::from(serde_json::from_slice::(data)?), + "CronJob" => Unknown::from(serde_json::from_slice::(data)?), + "StatefulSet" => Unknown::from(serde_json::from_slice::(data)?), "DaemonSet" => Unknown::from(serde_json::from_slice::(data)?), "ValidatingWebhookConfiguration" => Unknown::from(serde_json::from_slice::(data)?), "MutatingWebhookConfiguration" => Unknown::from(serde_json::from_slice::(data)?), diff --git a/src/ocp_postprocess.rs b/src/ocp_postprocess.rs index ef8c028a..e2da82d6 100644 --- a/src/ocp_postprocess.rs +++ b/src/ocp_postprocess.rs @@ -1,4 +1,4 @@ -use self::cluster_domain_rename::params::ClusterNamesRename; +use self::{cluster_domain_rename::params::ClusterNamesRename, proxy_rename::args::Proxy}; use crate::{ cluster_crypto::locations::K8sResourceLocation, config::{path::ConfigPath, ClusterCustomizations}, @@ -21,9 +21,11 @@ mod fnv; mod go_base32; pub(crate) mod hostname_rename; pub(crate) mod ip_rename; +pub(crate) mod proxy_rename; pub(crate) mod pull_secret_rename; /// Perform some OCP-related post-processing to make some OCP operators happy +#[allow(clippy::too_many_arguments)] pub(crate) async fn ocp_postprocess( in_memory_etcd_client: &Arc, cluster_customizations: &ClusterCustomizations, @@ -93,6 +95,12 @@ async fn run_cluster_customizations( .context("setting kubeadmin password hash")?; } + if let Some(proxy) = &cluster_customizations.proxy { + proxy_rename(in_memory_etcd_client, proxy, dirs, files) + .await + .context("renaming proxy")?; + } + if let Some(pull_secret) = &cluster_customizations.pull_secret { log::info!("setting new pull_secret"); pull_secret_rename(in_memory_etcd_client, pull_secret, dirs, files) @@ -505,3 +513,18 @@ pub(crate) async fn additional_trust_bundle_rename( Ok(()) } + +pub(crate) async fn proxy_rename( + in_memory_etcd_client: &Arc, + proxy: &Proxy, + static_dirs: &[ConfigPath], + static_files: &[ConfigPath], +) -> Result<()> { + let etcd_client = in_memory_etcd_client; + + proxy_rename::rename_all(etcd_client, proxy, static_dirs, static_files) + .await + .context("renaming all")?; + + Ok(()) +} diff --git a/src/ocp_postprocess/additional_trust_bundle.rs b/src/ocp_postprocess/additional_trust_bundle.rs index 6e08f170..dcbb0165 100644 --- a/src/ocp_postprocess/additional_trust_bundle.rs +++ b/src/ocp_postprocess/additional_trust_bundle.rs @@ -16,6 +16,11 @@ pub(crate) async fn rename_all( .await .context("renaming etcd resources")?; + let new_merged_bundle = match new_merged_bundle { + Some(bundle) => bundle, + None => return Ok(()), + }; + fix_filesystem_resources(&new_merged_bundle, additional_trust_bundle, static_dirs, static_files) .await .context("renaming filesystem resources")?; @@ -61,12 +66,17 @@ async fn fix_file_resources(_additional_trust_bundle: &str, _new_merged_bundle: Ok(()) } -async fn fix_etcd_resources(etcd_client: &Arc, additional_trust_bundle: &str) -> Result { +async fn fix_etcd_resources(etcd_client: &Arc, additional_trust_bundle: &str) -> Result> { // kubernetes.io/configmaps/openshift-config/custom-ca let original_additional_trust_bundle = etcd_rename::fix_original_additional_trust_bundle(etcd_client, additional_trust_bundle) .await .context("fixing labeled configmaps")?; + let original_additional_trust_bundle = match original_additional_trust_bundle { + Some(bundle) => bundle, + None => return Ok(None), + }; + let system_certs = utils::derive_system_certs_from_merged_bundle( original_additional_trust_bundle, utils::get_merged_bundle(etcd_client).await.context("getting merged bundle")?, @@ -91,5 +101,5 @@ async fn fix_etcd_resources(etcd_client: &Arc, additional_trust .await .context("fixing kcm openshift user ca")?; - Ok(new_merged_bundle) + Ok(Some(new_merged_bundle)) } diff --git a/src/ocp_postprocess/additional_trust_bundle/etcd_rename.rs b/src/ocp_postprocess/additional_trust_bundle/etcd_rename.rs index 4330504b..4114af3d 100644 --- a/src/ocp_postprocess/additional_trust_bundle/etcd_rename.rs +++ b/src/ocp_postprocess/additional_trust_bundle/etcd_rename.rs @@ -109,7 +109,10 @@ pub(crate) async fn fix_labeled_configmaps(etcd_client: &InMemoryK8sEtcd, full_m Ok(()) } -pub(crate) async fn fix_original_additional_trust_bundle(etcd_client: &InMemoryK8sEtcd, additional_trust_bundle: &str) -> Result { +pub(crate) async fn fix_original_additional_trust_bundle( + etcd_client: &InMemoryK8sEtcd, + additional_trust_bundle: &str, +) -> Result> { let proxy_config_k8s_resource_location = K8sResourceLocation::new(None, "Proxy", "cluster", "config.openshift.io"); let config = get_etcd_json(etcd_client, &proxy_config_k8s_resource_location) @@ -122,6 +125,10 @@ pub(crate) async fn fix_original_additional_trust_bundle(etcd_client: &InMemoryK .as_str() .context("trustedCA not a string")?; + if trusted_ca_configmap_name.is_empty() { + return Ok(None); + } + let ca_configmap_k8s_resource_location = K8sResourceLocation::new(Some("openshift-config"), "ConfigMap", trusted_ca_configmap_name, "v1"); @@ -142,11 +149,13 @@ pub(crate) async fn fix_original_additional_trust_bundle(etcd_client: &InMemoryK put_etcd_yaml(etcd_client, &ca_configmap_k8s_resource_location, configmap).await?; - Ok(original_additional_trust_bundle - .context("no ca-bundle.crt in trustedCA configmap")? - .as_str() - .context("ca-bundle.crt not a string")? - .to_string()) + Ok(Some( + original_additional_trust_bundle + .context("no ca-bundle.crt in trustedCA configmap")? + .as_str() + .context("ca-bundle.crt not a string")? + .to_string(), + )) } pub(crate) async fn fix_monitoring_configmaps(etcd_client: &InMemoryK8sEtcd, new_merged_bundle: &str) -> Result<()> { diff --git a/src/ocp_postprocess/proxy_rename.rs b/src/ocp_postprocess/proxy_rename.rs new file mode 100644 index 00000000..38729845 --- /dev/null +++ b/src/ocp_postprocess/proxy_rename.rs @@ -0,0 +1,94 @@ +pub(crate) mod args; + +use crate::{config::path::ConfigPath, k8s_etcd::InMemoryK8sEtcd}; +use anyhow::{Context, Result}; +use std::{path::Path, sync::Arc}; + +use self::args::Proxy; + +mod etcd_rename; +mod filesystem_rename; +mod utils; + +pub(crate) async fn rename_all( + etcd_client: &Arc, + proxy: &Proxy, + static_dirs: &[ConfigPath], + static_files: &[ConfigPath], +) -> Result<(), anyhow::Error> { + fix_etcd_resources(etcd_client, proxy).await.context("renaming etcd resources")?; + + fix_filesystem_resources(proxy, static_dirs, static_files) + .await + .context("renaming filesystem resources")?; + + Ok(()) +} + +async fn fix_filesystem_resources(proxy: &Proxy, static_dirs: &[ConfigPath], static_files: &[ConfigPath]) -> Result<()> { + for dir in static_dirs { + fix_dir_resources(proxy, dir).await?; + } + + for file in static_files { + fix_file_resources(proxy, file).await?; + } + + Ok(()) +} + +async fn fix_dir_resources(proxy: &Proxy, dir: &Path) -> Result<()> { + filesystem_rename::rename_proxy_env_dir(proxy, dir) + .await + .context("fixing etcd static pods")?; + + filesystem_rename::fix_filesystem_currentconfig(proxy, dir) + .await + .context("renaming currentconfig")?; + + filesystem_rename::fix_pods_yaml(proxy, dir).await.context("renaming pod yaml")?; + + Ok(()) +} + +async fn fix_file_resources(proxy: &Proxy, file: &Path) -> Result<()> { + filesystem_rename::rename_proxy_env_file(proxy, file) + .await + .context("fixing etcd static pods")?; + + Ok(()) +} + +async fn fix_etcd_resources(etcd_client: &Arc, proxy: &Proxy) -> Result<()> { + etcd_rename::fix_machineconfigs(etcd_client, proxy) + .await + .context("fixing machineconfigs")?; + + etcd_rename::fix_proxy(etcd_client, proxy).await.context("fixing proxy")?; + + etcd_rename::fix_storages(etcd_client, proxy).await.context("fixing storages")?; + + etcd_rename::fix_openshiftapiserver(etcd_client, proxy) + .await + .context("fixing openshiftapiserver")?; + + etcd_rename::fix_kubeapiserver(etcd_client, proxy) + .await + .context("fixing kubeapiserver")?; + + etcd_rename::fix_kubecontrollermanager(etcd_client, proxy) + .await + .context("fixing kubecontrolermanager")?; + + etcd_rename::fix_controllerconfigs(etcd_client, proxy) + .await + .context("fixing controllerconfigs")?; + + etcd_rename::fix_containers(etcd_client, proxy).await.context("fixing containers")?; + + etcd_rename::fix_configmap_pods(etcd_client, proxy) + .await + .context("fixing pod configmaps")?; + + Ok(()) +} diff --git a/src/ocp_postprocess/proxy_rename/args.rs b/src/ocp_postprocess/proxy_rename/args.rs new file mode 100644 index 00000000..1f29533e --- /dev/null +++ b/src/ocp_postprocess/proxy_rename/args.rs @@ -0,0 +1,30 @@ +use anyhow::{ensure, Result}; + +#[derive(Clone, serde::Serialize)] +pub(crate) struct Proxy { + pub(crate) http_proxy: String, + pub(crate) https_proxy: String, + pub(crate) no_proxy: String, +} + +impl Proxy { + pub(crate) fn parse(value: &str) -> Result { + let parts = value.split('|').collect::>(); + + ensure!( + parts.len() == 3, + "expected three parts separated by '|' in proxy argument, i.e. '||', found {}", + parts.len() + ); + + let http_proxy = parts[0].to_string(); + let https_proxy = parts[1].to_string(); + let no_proxy = parts[2].to_string(); + + Ok(Self { + http_proxy, + https_proxy, + no_proxy, + }) + } +} diff --git a/src/ocp_postprocess/proxy_rename/etcd_rename.rs b/src/ocp_postprocess/proxy_rename/etcd_rename.rs new file mode 100644 index 00000000..48da267f --- /dev/null +++ b/src/ocp_postprocess/proxy_rename/etcd_rename.rs @@ -0,0 +1,388 @@ +use super::{args::Proxy, utils::fix_machineconfig}; +use crate::{ + cluster_crypto::locations::K8sResourceLocation, + k8s_etcd::{get_etcd_json, put_etcd_yaml, InMemoryK8sEtcd}, +}; +use anyhow::{Context, Result}; +use futures_util::future::join_all; +use serde_json::Value; +use std::sync::Arc; + +pub(crate) async fn fix_machineconfigs(etcd_client: &Arc, proxy: &Proxy) -> Result<()> { + join_all( + etcd_client + .list_keys("machineconfiguration.openshift.io/machineconfigs") + .await? + .into_iter() + .map(|key| async move { + let etcd_result = etcd_client + .get(key.clone()) + .await + .with_context(|| format!("getting key {:?}", key))? + .context("key disappeared")?; + let value: Value = serde_yaml::from_slice(etcd_result.value.as_slice()) + .with_context(|| format!("deserializing value of key {:?}", key,))?; + let k8s_resource_location = K8sResourceLocation::try_from(&value)?; + + let mut machineconfig = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context("no machineconfig")?; + + fix_machineconfig(&mut machineconfig, proxy).context("fixing machineconfig")?; + + put_etcd_yaml(etcd_client, &k8s_resource_location, machineconfig).await?; + + Ok(()) + }), + ) + .await + .into_iter() + .collect::>>()?; + + Ok(()) +} + +pub(crate) async fn fix_proxy(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + join_all( + etcd_client + .list_keys("config.openshift.io/proxies/cluster") + .await? + .into_iter() + .map(|key| async move { + let etcd_result = etcd_client + .get(key.clone()) + .await + .with_context(|| format!("getting key {:?}", key))? + .context("key disappeared")?; + + let value: Value = serde_yaml::from_slice(etcd_result.value.as_slice()) + .with_context(|| format!("deserializing value of key {:?}", key,))?; + + let k8s_resource_location = K8sResourceLocation::try_from(&value)?; + + let mut cluster_proxy = get_etcd_json(etcd_client, &k8s_resource_location).await?.context("no proxy")?; + + let spec = cluster_proxy + .pointer_mut("/spec") + .context("no /spec")? + .as_object_mut() + .context("spec not an object")?; + + spec.insert("httpProxy".to_string(), Value::String(proxy.http_proxy.clone())); + spec.insert("httpsProxy".to_string(), Value::String(proxy.https_proxy.clone())); + spec.insert("noProxy".to_string(), Value::String(proxy.no_proxy.clone())); + + let status = cluster_proxy + .pointer_mut("/status") + .context("no /status")? + .as_object_mut() + .context("status not an object")?; + + status.insert("httpProxy".to_string(), Value::String(proxy.http_proxy.clone())); + status.insert("httpsProxy".to_string(), Value::String(proxy.https_proxy.clone())); + status.insert("noProxy".to_string(), Value::String(proxy.no_proxy.clone())); + + put_etcd_yaml(etcd_client, &k8s_resource_location, cluster_proxy).await?; + + Ok(()) + }), + ) + .await + .into_iter() + .collect::>>()?; + + Ok(()) +} + +pub(crate) async fn fix_controllerconfigs(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + join_all( + etcd_client + .list_keys("machineconfiguration.openshift.io/controllerconfigs/machine-config-controller") + .await? + .into_iter() + .map(|key| async move { + let etcd_result = etcd_client + .get(key.clone()) + .await + .with_context(|| format!("getting key {:?}", key))? + .context("key disappeared")?; + + let value: Value = serde_yaml::from_slice(etcd_result.value.as_slice()) + .with_context(|| format!("deserializing value of key {:?}", key,))?; + + let k8s_resource_location = K8sResourceLocation::try_from(&value)?; + + let mut cluster_proxy = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context("no controllerconfig")?; + + let object_mut = cluster_proxy.pointer_mut("/spec/proxy").context("no /spec/proxy")?.as_object_mut(); + + match object_mut { + None => { + // This is simply null when the proxy is not set + return Ok(()); + } + Some(spec_proxy) => { + spec_proxy.insert("httpProxy".to_string(), Value::String(proxy.http_proxy.clone())); + spec_proxy.insert("httpsProxy".to_string(), Value::String(proxy.https_proxy.clone())); + spec_proxy.insert("noProxy".to_string(), Value::String(proxy.no_proxy.clone())); + + put_etcd_yaml(etcd_client, &k8s_resource_location, cluster_proxy).await?; + } + } + + Ok(()) + }), + ) + .await + .into_iter() + .collect::>>()?; + + Ok(()) +} + +pub(crate) async fn fix_containers(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + join_all( + etcd_client + .list_keys("deployments/") + .await? + .into_iter() + .chain(etcd_client.list_keys("statefulsets/").await?.into_iter()) + .chain(etcd_client.list_keys("daemonsets/").await?.into_iter()) + .chain(etcd_client.list_keys("jobs/").await?.into_iter()) + .chain(etcd_client.list_keys("cronjobs/").await?.into_iter()) + .chain(etcd_client.list_keys("monitoring.coreos.com/alertmanagers/").await?.into_iter()) + .chain(etcd_client.list_keys("monitoring.coreos.com/prometheuses/").await?.into_iter()) + .chain(etcd_client.list_keys("controllerrevisions/").await?.into_iter()) + .map(|key| async move { + let etcd_result = etcd_client + .get(key.clone()) + .await + .with_context(|| format!("getting key {:?}", key))? + .context("key disappeared")?; + + let value: Value = serde_json::from_slice(etcd_result.value.as_slice()) + .with_context(|| format!("deserializing value of key {:?}", key,))?; + + let k8s_resource_location = K8sResourceLocation::try_from(&value)?; + + let mut workload = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context(format!("no workload for {:?}", k8s_resource_location.as_etcd_key()))?; + + let kind = &workload.pointer("/kind"); + + let kind = match kind { + Some(kind) => kind, + None => return Ok(()), + } + .as_str() + .context("kind not a string")?; + + let prefix = match kind { + "Deployment" | "DaemonSet" | "StatefulSet" | "Job" | "CronJob" | "ControllerRevision" => "/spec/template/spec", + "Pod" | "Alertmanager" | "Prometheus" => "/spec", + _ => return Ok(()), + }; + + if kind != "ControllerRevision" { + super::utils::fix_containers(&mut workload, proxy, prefix).context("fixing containers")?; + } else { + // ControllerRevision has a special format, it has a field called data, which + // is a JSON array of numbers, which represent bytes. We need to convert this + // array of numbers to a string, then parse it as JSON, then fix the containers + // in the JSON, then convert it back to a string, then convert it back to a JSON + // array of numbers, then put it back in the ControllerRevision. + let workload_data = workload + .pointer_mut("/data") + .context("no /data")? + .as_object_mut() + .context("data not an object")?; + + let data_string = &String::from_utf8( + workload_data + .get("raw") + .context("no data")? + .as_array() + .context("data not an array")? + .iter() + .map(|v| v.as_u64().context("fieldsV1 not a number")) + .collect::>>() + .context("parsing byte array")? + .into_iter() + .map(|v| v as u8) + .collect::>(), + ) + .context("data not utf8")?; + + let mut data_json = serde_json::from_str(data_string).context("parsing data")?; + + super::utils::fix_containers(&mut data_json, proxy, prefix).context("fixing containers")?; + + workload_data.insert( + "raw".to_string(), + serde_json::to_string(&data_json) + .context("serializing data")? + .bytes() + .map(|b| Value::Number(b.into())) + .collect(), + ); + }; + + put_etcd_yaml(etcd_client, &k8s_resource_location, workload).await?; + + Ok(()) + }), + ) + .await + .into_iter() + .collect::>>()?; + + Ok(()) +} + +pub(crate) async fn fix_storages(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + let k8s_resource_location = K8sResourceLocation::new(None, "Storage", "cluster", "operator.openshift.io/v1"); + + let mut storage = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context(format!("no {:?}", k8s_resource_location.as_etcd_key()))?; + + let spec = storage + .pointer_mut("/spec/observedConfig/targetconfig/proxy") + .context("no proxy")? + .as_object_mut() + .context("proxy not an object")?; + + generic_proxy_fix(spec, proxy); + + put_etcd_yaml(etcd_client, &k8s_resource_location, storage).await?; + + Ok(()) +} + +fn generic_proxy_fix(spec: &mut serde_json::Map, proxy: &Proxy) { + spec.insert("HTTPS_PROXY".to_string(), Value::String(proxy.http_proxy.clone())); + spec.insert("HTTP_PROXY".to_string(), Value::String(proxy.https_proxy.clone())); + spec.insert("NO_PROXY".to_string(), Value::String(proxy.no_proxy.clone())); +} + +pub(crate) async fn fix_openshiftapiserver(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + let k8s_resource_location = K8sResourceLocation::new(None, "OpenShiftAPIServer", "cluster", "operator.openshift.io/v1"); + + let mut cluster_proxy = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context(format!("no {:?}", k8s_resource_location.as_etcd_key()))?; + + let spec = cluster_proxy + .pointer_mut("/spec/observedConfig/workloadcontroller/proxy") + .context("no proxy")? + .as_object_mut() + .context("proxy not an object")?; + + generic_proxy_fix(spec, proxy); + + put_etcd_yaml(etcd_client, &k8s_resource_location, cluster_proxy).await?; + + Ok(()) +} + +pub(crate) async fn fix_kubeapiserver(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + let k8s_resource_location = K8sResourceLocation::new(None, "KubeAPIServer", "cluster", "operator.openshift.io/v1"); + + let mut cluster_proxy = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context("no kubeapiserver")?; + + let spec = cluster_proxy + .pointer_mut("/spec/observedConfig/targetconfigcontroller/proxy") + .context("no proxy")? + .as_object_mut() + .context("proxy not an object")?; + + generic_proxy_fix(spec, proxy); + + put_etcd_yaml(etcd_client, &k8s_resource_location, cluster_proxy).await?; + + Ok(()) +} + +pub(crate) async fn fix_kubecontrollermanager(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + let k8s_resource_location = K8sResourceLocation::new(None, "KubeControllerManager", "cluster", "operator.openshift.io/v1"); + + let mut cluster_proxy = get_etcd_json(etcd_client, &k8s_resource_location) + .await? + .context("no kubecontrollermanager")?; + + let spec = cluster_proxy + .pointer_mut("/spec/observedConfig/targetconfigcontroller/proxy") + .context("no proxy")? + .as_object_mut() + .context("proxy not an object")?; + + generic_proxy_fix(spec, proxy); + + put_etcd_yaml(etcd_client, &k8s_resource_location, cluster_proxy).await?; + + Ok(()) +} + +pub(crate) async fn fix_configmap_pods(etcd_client: &InMemoryK8sEtcd, proxy: &Proxy) -> Result<()> { + join_all( + etcd_client + .list_keys("configmaps/openshift-kube-apiserver/kube-apiserver-pod") + .await? + .into_iter() + .chain( + etcd_client + .list_keys("configmaps/openshift-kube-controller-manager/kube-controller-manager-pod") + .await? + .into_iter(), + ) + .map(|key| async move { + let etcd_result = etcd_client + .get(key.clone()) + .await + .with_context(|| format!("getting key {:?}", key))? + .context("key disappeared")?; + + let value: Value = serde_json::from_slice(etcd_result.value.as_slice()) + .with_context(|| format!("deserializing value of key {:?}", key,))?; + + let k8s_resource_location = K8sResourceLocation::try_from(&value)?; + + let mut configmap = get_etcd_json(etcd_client, &k8s_resource_location).await?.context("no configmap")?; + + let data = configmap + .pointer_mut("/data") + .context("no /data")? + .as_object_mut() + .context("data not an object")?; + + let pod_json = data + .get("pod.yaml") + .context("no pod.yaml")? + .as_str() + .context("pod.yaml not a string")?; + + let mut pod = serde_json::from_str(pod_json).context("parsing pod.yaml")?; + + super::utils::fix_containers(&mut pod, proxy, "/spec").context("fixing containers")?; + + data.insert( + "pod.yaml".to_string(), + Value::String(serde_json::to_string(&pod).context("serializing pod")?), + ); + + put_etcd_yaml(etcd_client, &k8s_resource_location, configmap).await?; + + Ok(()) + }), + ) + .await + .into_iter() + .collect::>>()?; + + Ok(()) +} diff --git a/src/ocp_postprocess/proxy_rename/filesystem_rename.rs b/src/ocp_postprocess/proxy_rename/filesystem_rename.rs new file mode 100644 index 00000000..0b7de4d7 --- /dev/null +++ b/src/ocp_postprocess/proxy_rename/filesystem_rename.rs @@ -0,0 +1,130 @@ +use super::{ + args::Proxy, + utils::{self, fix_containers, fix_machineconfig}, +}; +use crate::file_utils::{self, commit_file, read_file_to_string}; +use anyhow::{self, Context, Result}; +use futures_util::future::join_all; +use serde_json::Value; +use std::{collections::HashSet, path::Path}; + +pub(crate) async fn rename_proxy_env_file(proxy: &Proxy, file: &Path) -> Result<()> { + if file + .file_name() + .context("getting file name")? + .to_str() + .context("converting file name to string")? + != "proxy.env" + { + return Ok(()); + } + + rename_proxy_env(file, proxy).await?; + + Ok(()) +} + +async fn rename_proxy_env(file: &Path, proxy: &Proxy) -> Result<(), anyhow::Error> { + commit_file( + file, + utils::rename_proxy_env_file_contents(proxy, read_file_to_string(file).await.context("reading proxy.env")?), + ) + .await + .context("writing proxy.env to disk")?; + Ok(()) +} + +pub(crate) async fn rename_proxy_env_dir(proxy: &Proxy, dir: &Path) -> Result<()> { + join_all( + file_utils::globvec(dir, "**/proxy.env.mcdorig")? + .into_iter() + .collect::>() + .into_iter() + .map(|file_path| { + let proxy = proxy.clone(); + let mcdorig_file_path = file_path.clone(); + tokio::spawn(async move { + async move { + rename_proxy_env(&mcdorig_file_path, &proxy) + .await + .context("renaming proxy.env.mcdorig")?; + + anyhow::Ok(()) + } + .await + .context(format!("fixing kubeconfig {:?}", &file_path)) + }) + }), + ) + .await + .into_iter() + .collect::, _>>()? + .into_iter() + .collect::>>()?; + + Ok(()) +} + +pub(crate) async fn fix_filesystem_currentconfig(proxy: &Proxy, dir: &Path) -> Result<()> { + join_all(file_utils::globvec(dir, "**/currentconfig")?.into_iter().map(|file_path| { + let proxy_path = file_path.clone(); + let proxy = proxy.clone(); + tokio::spawn(async move { + async move { + let contents = read_file_to_string(&file_path) + .await + .context("reading kube-apiserver oauthMetadata")?; + + let mut config: Value = serde_json::from_str(&contents).context("parsing currentconfig")?; + + fix_machineconfig(&mut config, &proxy)?; + + commit_file(file_path, serde_json::to_string(&config).context("serializing currentconfig")?) + .await + .context("writing currentconfig to disk")?; + + anyhow::Ok(()) + } + .await + .context(format!("fixing currentconfig {:?}", proxy_path)) + }) + })) + .await + .into_iter() + .collect::, _>>()? + .into_iter() + .collect::>>()?; + + Ok(()) +} + +pub(crate) async fn fix_pods_yaml(proxy: &Proxy, dir: &Path) -> Result<()> { + join_all(file_utils::globvec(dir, "**/*pod.yaml")?.into_iter().map(|file_path| { + let pod_path = file_path.clone(); + let proxy = proxy.clone(); + tokio::spawn(async move { + async move { + let contents = read_file_to_string(&file_path).await.context("reading pods.yaml")?; + + let mut config: Value = serde_yaml::from_str(&contents).context("parsing pods.yaml")?; + + fix_containers(&mut config, &proxy, "/spec").context("fixing containers")?; + + commit_file(file_path, serde_yaml::to_string(&config).context("serializing pods.yaml")?) + .await + .context("writing pods.yaml to disk")?; + + anyhow::Ok(()) + } + .await + .context(format!("fixing pods.yaml {:?}", pod_path)) + }) + })) + .await + .into_iter() + .collect::, _>>()? + .into_iter() + .collect::>>()?; + + Ok(()) +} diff --git a/src/ocp_postprocess/proxy_rename/utils.rs b/src/ocp_postprocess/proxy_rename/utils.rs new file mode 100644 index 00000000..a6e000b3 --- /dev/null +++ b/src/ocp_postprocess/proxy_rename/utils.rs @@ -0,0 +1,303 @@ +use std::ops::Not; + +use super::args::Proxy; +use crate::file_utils; +use anyhow::{Context, Result}; +use serde_json::Value; + +pub(crate) fn rename_proxy_env_file_contents(proxy: &Proxy, contents: String) -> String { + let mut new_config_lines = vec![]; + + let http_proxy = &format!("HTTP_PROXY={}", proxy.http_proxy); + let https_proxy = &format!("HTTPS_PROXY={}", proxy.https_proxy); + let no_proxy = &format!("NO_PROXY={}", proxy.no_proxy); + + if !proxy.http_proxy.is_empty() { + new_config_lines.push(http_proxy.as_str()); + } + if !proxy.https_proxy.is_empty() { + new_config_lines.push(https_proxy.as_str()); + } + if !proxy.no_proxy.is_empty() { + new_config_lines.push(no_proxy.as_str()); + } + + contents + .lines() + .filter(|line| !line.starts_with("HTTP_PROXY=") && !line.starts_with("HTTPS_PROXY=") && !line.starts_with("NO_PROXY=")) + .chain(new_config_lines) + .collect::>() + .join("\n") +} + +pub(crate) fn fix_machineconfig(machineconfig: &mut Value, proxy: &Proxy) -> Result<()> { + let pointer_mut = machineconfig.pointer_mut("/spec/config/storage/files"); + if pointer_mut.is_none() { + // Not all machineconfigs have files to look at and that's ok + return Ok(()); + }; + + let find_map = pointer_mut + .context("no /spec/config/storage/files")? + .as_array_mut() + .context("files not an array")? + .iter_mut() + .find_map(|file| (file.pointer("/path")? == "/etc/mco/proxy.env").then_some(file)); + + if find_map.is_none() { + // Not all machineconfigs have the file we're looking for and that's ok + return Ok(()); + }; + + let file_contents = find_map + .context("no /etc/mco/proxy.env file in machineconfig")? + .pointer_mut("/contents") + .context("no .contents")? + .as_object_mut() + .context("annotations not an object")?; + + let original_data = file_contents["source"].as_str().context("source not a string")?; + + let (decoded, _fragment) = data_url::DataUrl::process(original_data) + .ok() + .context("dataurl processing")? + .decode_to_vec() + .ok() + .context("dataurl decoding")?; + + let new = rename_proxy_env_file_contents(proxy, String::from_utf8(decoded).context("utf8 decoding")?); + + file_contents.insert("source".to_string(), serde_json::Value::String(file_utils::dataurl_encode(&new))); + + Ok(()) +} + +fn get_http_proxy_var_name(is_upper: bool) -> &'static str { + if is_upper { + "HTTP_PROXY" + } else { + "http_proxy" + } +} + +fn get_https_proxy_var_name(is_upper: bool) -> &'static str { + if is_upper { + "HTTPS_PROXY" + } else { + "https_proxy" + } +} + +fn get_no_proxy_var_name(is_upper: bool) -> &'static str { + if is_upper { + "NO_PROXY" + } else { + "no_proxy" + } +} + +pub(crate) fn fix_containers(config: &mut Value, proxy: &Proxy, prefix: &str) -> Result<()> { + let suffixes = &["containers", "initContainers"]; + let casing_options = [true, false]; + + for (is_uppercase, suffix) in casing_options.into_iter().flat_map(|x| suffixes.iter().map(move |y| (x, y))) { + let pointer_mut = config.pointer_mut(format!("{prefix}/{suffix}").as_str()); + + let containers = match pointer_mut { + Some(containers) => containers, + None => continue, + }; + + for container in containers.as_array_mut().context("containers not an array")? { + let env = container.pointer_mut("/env"); + + let env = match env { + Some(env) => env, + // Not all containers have an env section + None => continue, + }; + + let container_env = env.as_array_mut().context("env not an array")?; + + let desired_proxies = [ + proxy + .http_proxy + .is_empty() + .not() + .then_some((get_http_proxy_var_name(is_uppercase), proxy.http_proxy.as_str())), + proxy + .https_proxy + .is_empty() + .not() + .then_some((get_https_proxy_var_name(is_uppercase), proxy.https_proxy.as_str())), + proxy + .no_proxy + .is_empty() + .not() + .then_some((get_no_proxy_var_name(is_uppercase), proxy.no_proxy.as_str())), + ] + .iter() + .filter_map(|x| *x) + .map(|(k, v)| serde_json::json!({"name": k, "value": v})) + .collect::>(); + + let insertion_index = remove_existing_proxy_env_vars(container_env, is_uppercase)?; + + match insertion_index { + Some(i) => { + container_env.splice(i..i, desired_proxies); + } + None => continue, + } + } + } + + Ok(()) +} + +// Remove all existing proxy env vars from the container's env and return the index of where +// the first proxy env var should be inserted +fn remove_existing_proxy_env_vars(container_env: &mut Vec, is_upper: bool) -> Result> { + let indices_to_remove = container_env + .iter() + .enumerate() + .filter_map(|(i, env)| { + let name = env.pointer("/name").context("no /name").ok()?; + if name == get_http_proxy_var_name(is_upper) + || name == get_https_proxy_var_name(is_upper) + || name == get_no_proxy_var_name(is_upper) + { + Some(i) + } else { + None + } + }) + .collect::>(); + + // Run backwards so we don't have to adjust the indices as we remove elements + for i in indices_to_remove.iter().rev() { + container_env.remove(*i); + } + + if indices_to_remove.is_empty() { + Ok(None) + } else { + Ok(Some(indices_to_remove[0])) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_fix_containers() { + let data = r#"{"apiVersion":"apps/v1","kind":"Deployment","spec":{"template":{"spec":{"containers":[{"env":[{"name":"DEFAULT_DESTINATION_CA_PATH","value":"/var/run/configmaps/service-ca/service-ca.crt"},{"name":"HTTP_PROXY","value":"http://squid.corp.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://squid.corp.redhat.com:3128"},{"name":"NO_PROXY","value":".cluster.local,.seed.ibo0.redhat.com,.svc,10.128.0.0/14,127.0.0.1,172.30.0.0/16,192.168.126.0/24,api-int.seed.ibo0.redhat.com,api-int.seed.redhat.com,localhost"},{"name":"RELOAD_INTERVAL","value":"5s"},{"name":"STATS_USERNAME_FILE","value":"/var/lib/haproxy/conf/metrics-auth/statsUsername"},{"name":"http_proxy","value":"http://squid.corp.redhat.com:3128"},{"name":"https_proxy","value":"http://squid.corp.redhat.com:3128"},{"name":"no_proxy","value":".cluster.local,.seed.ibo0.redhat.com,.svc,10.128.0.0/14,127.0.0.1,172.30.0.0/16,192.168.126.0/24,api-int.seed.ibo0.redhat.com,api-int.seed.redhat.com,localhost"}],"name":"router"}]}}}}"#; + + let mut config: Value = serde_json::from_str(data).unwrap(); + + let proxy = Proxy { + http_proxy: "http://proxy.example.com".to_string(), + https_proxy: "http://proxy.example.com".to_string(), + no_proxy: "localhost".to_string(), + }; + + fix_containers(&mut config, &proxy, "/spec/template/spec").unwrap(); + + assert!(!serde_json::to_string(&config).unwrap().contains("squid.corp.redhat.com")); + } + + #[test] + fn test_remove_existing_proxy_env_vars() { + let mut env = vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "HTTP_PROXY", "value": "http://proxy.example.com"}), + json!({"name": "HTTPS_PROXY", "value": "http://proxy.example.com"}), + json!({"name": "NO_PROXY", "value": "localhost"}), + json!({"name": "OTHER", "value": "value"}), + ]; + + let insertion_index = remove_existing_proxy_env_vars(&mut env, true).unwrap(); + + assert_eq!( + env, + vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "OTHER", "value": "value"}) + ] + ); + assert_eq!(insertion_index, Some(1)); + + //////////////////////////////////////////////////// + + let mut env = vec![ + json!({"name": "HTTP_PROXY", "value": "http://proxy.example.com"}), + json!({"name": "HTTPS_PROXY", "value": "http://proxy.example.com"}), + json!({"name": "NO_PROXY", "value": "localhost"}), + ]; + + let insertion_index = remove_existing_proxy_env_vars(&mut env, true).unwrap(); + + assert!(env.is_empty()); + assert_eq!(insertion_index, Some(0)); + + //////////////////////////////////////////////////// + + let mut env = vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "HTTPS_PROXY", "value": "http://proxy.example.com"}), + json!({"name": "OTHER", "value": "value"}), + ]; + + let insertion_index = remove_existing_proxy_env_vars(&mut env, true).unwrap(); + + assert_eq!( + env, + vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "OTHER", "value": "value"}) + ] + ); + assert_eq!(insertion_index, Some(1)); + + //////////////////////////////////////////////////// + + let mut env = vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "https_proxy", "value": "http://proxy.example.com"}), + json!({"name": "OTHER", "value": "value"}), + ]; + + let insertion_index = remove_existing_proxy_env_vars(&mut env, false).unwrap(); + + assert_eq!( + env, + vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "OTHER", "value": "value"}) + ] + ); + assert_eq!(insertion_index, Some(1)); + + //////////////////////////////////////////////////// + + let mut env = vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "https_proxy", "value": "http://proxy.example.com"}), + json!({"name": "OTHER", "value": "value"}), + ]; + + let insertion_index = remove_existing_proxy_env_vars(&mut env, true).unwrap(); + + assert_eq!( + env, + vec![ + json!({"name": "SOME", "value": "value"}), + json!({"name": "https_proxy", "value": "http://proxy.example.com"}), + json!({"name": "OTHER", "value": "value"}) + ] + ); + assert_eq!(insertion_index, None); + } +} diff --git a/src/protobuf_gen/k8s.io.api.batch.v1.rs b/src/protobuf_gen/k8s.io.api.batch.v1.rs new file mode 100644 index 00000000..635f03c5 --- /dev/null +++ b/src/protobuf_gen/k8s.io.api.batch.v1.rs @@ -0,0 +1,605 @@ +/// CronJob represents the configuration of a single cron job. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CronJob { + /// Standard object's metadata. + /// More info: + /// +optional + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::ObjectMeta, + >, + /// Specification of the desired behavior of a cron job, including the schedule. + /// More info: + /// +optional + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, + /// Current status of a cron job. + /// More info: + /// +optional + #[prost(message, optional, tag = "3")] + pub status: ::core::option::Option, +} +/// CronJobList is a collection of cron jobs. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CronJobList { + /// Standard list metadata. + /// More info: + /// +optional + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::ListMeta, + >, + /// items is the list of CronJobs. + #[prost(message, repeated, tag = "2")] + pub items: ::prost::alloc::vec::Vec, +} +/// CronJobSpec describes how the job execution will look like and when it will actually run. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CronJobSpec { + /// The schedule in Cron format, see + #[prost(string, optional, tag = "1")] + pub schedule: ::core::option::Option<::prost::alloc::string::String>, + /// The time zone name for the given schedule, see + /// If not specified, this will default to the time zone of the kube-controller-manager process. + /// The set of valid time zone names and the time zone offset is loaded from the system-wide time zone + /// database by the API server during CronJob validation and the controller manager during execution. + /// If no system-wide time zone database can be found a bundled version of the database is used instead. + /// If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host + /// configuration, the controller will stop creating new new Jobs and will create a system event with the + /// reason UnknownTimeZone. + /// More information can be found in + /// +optional + #[prost(string, optional, tag = "8")] + pub time_zone: ::core::option::Option<::prost::alloc::string::String>, + /// Optional deadline in seconds for starting the job if it misses scheduled + /// time for any reason. Missed jobs executions will be counted as failed ones. + /// +optional + #[prost(int64, optional, tag = "2")] + pub starting_deadline_seconds: ::core::option::Option, + /// Specifies how to treat concurrent executions of a Job. + /// Valid values are: + /// + /// - "Allow" (default): allows CronJobs to run concurrently; + /// - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + /// - "Replace": cancels currently running job and replaces it with a new one + /// +optional + #[prost(string, optional, tag = "3")] + pub concurrency_policy: ::core::option::Option<::prost::alloc::string::String>, + /// This flag tells the controller to suspend subsequent executions, it does + /// not apply to already started executions. Defaults to false. + /// +optional + #[prost(bool, optional, tag = "4")] + pub suspend: ::core::option::Option, + /// Specifies the job that will be created when executing a CronJob. + #[prost(message, optional, tag = "5")] + pub job_template: ::core::option::Option, + /// The number of successful finished jobs to retain. Value must be non-negative integer. + /// Defaults to 3. + /// +optional + #[prost(int32, optional, tag = "6")] + pub successful_jobs_history_limit: ::core::option::Option, + /// The number of failed finished jobs to retain. Value must be non-negative integer. + /// Defaults to 1. + /// +optional + #[prost(int32, optional, tag = "7")] + pub failed_jobs_history_limit: ::core::option::Option, +} +/// CronJobStatus represents the current state of a cron job. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CronJobStatus { + /// A list of pointers to currently running jobs. + /// +optional + /// +listType=atomic + #[prost(message, repeated, tag = "1")] + pub active: ::prost::alloc::vec::Vec, + /// Information when was the last time the job was successfully scheduled. + /// +optional + #[prost(message, optional, tag = "4")] + pub last_schedule_time: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::Time, + >, + /// Information when was the last time the job successfully completed. + /// +optional + #[prost(message, optional, tag = "5")] + pub last_successful_time: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::Time, + >, +} +/// Job represents the configuration of a single job. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Job { + /// Standard object's metadata. + /// More info: + /// +optional + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::ObjectMeta, + >, + /// Specification of the desired behavior of a job. + /// More info: + /// +optional + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, + /// Current status of a job. + /// More info: + /// +optional + #[prost(message, optional, tag = "3")] + pub status: ::core::option::Option, +} +/// JobCondition describes current state of a job. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JobCondition { + /// Type of job condition, Complete or Failed. + #[prost(string, optional, tag = "1")] + pub r#type: ::core::option::Option<::prost::alloc::string::String>, + /// Status of the condition, one of True, False, Unknown. + #[prost(string, optional, tag = "2")] + pub status: ::core::option::Option<::prost::alloc::string::String>, + /// Last time the condition was checked. + /// +optional + #[prost(message, optional, tag = "3")] + pub last_probe_time: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::Time, + >, + /// Last time the condition transit from one status to another. + /// +optional + #[prost(message, optional, tag = "4")] + pub last_transition_time: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::Time, + >, + /// (brief) reason for the condition's last transition. + /// +optional + #[prost(string, optional, tag = "5")] + pub reason: ::core::option::Option<::prost::alloc::string::String>, + /// Human readable message indicating details about last transition. + /// +optional + #[prost(string, optional, tag = "6")] + pub message: ::core::option::Option<::prost::alloc::string::String>, +} +/// JobList is a collection of jobs. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JobList { + /// Standard list metadata. + /// More info: + /// +optional + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::ListMeta, + >, + /// items is the list of Jobs. + #[prost(message, repeated, tag = "2")] + pub items: ::prost::alloc::vec::Vec, +} +/// JobSpec describes how the job execution will look like. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JobSpec { + /// Specifies the maximum desired number of pods the job should + /// run at any given time. The actual number of pods running in steady state will + /// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + /// i.e. when the work left to do is less than max parallelism. + /// More info: + /// +optional + #[prost(int32, optional, tag = "1")] + pub parallelism: ::core::option::Option, + /// Specifies the desired number of successfully finished pods the + /// job should be run with. Setting to null means that the success of any + /// pod signals the success of all pods, and allows parallelism to have any positive + /// value. Setting to 1 means that parallelism is limited to 1 and the success of that + /// pod signals the success of the job. + /// More info: + /// +optional + #[prost(int32, optional, tag = "2")] + pub completions: ::core::option::Option, + /// Specifies the duration in seconds relative to the startTime that the job + /// may be continuously active before the system tries to terminate it; value + /// must be positive integer. If a Job is suspended (at creation or through an + /// update), this timer will effectively be stopped and reset when the Job is + /// resumed again. + /// +optional + #[prost(int64, optional, tag = "3")] + pub active_deadline_seconds: ::core::option::Option, + /// Specifies the policy of handling failed pods. In particular, it allows to + /// specify the set of actions and conditions which need to be + /// satisfied to take the associated action. + /// If empty, the default behaviour applies - the counter of failed pods, + /// represented by the jobs's .status.failed field, is incremented and it is + /// checked against the backoffLimit. This field cannot be used in combination + /// with restartPolicy=OnFailure. + /// + /// This field is beta-level. It can be used when the `JobPodFailurePolicy` + /// feature gate is enabled (enabled by default). + /// +optional + #[prost(message, optional, tag = "11")] + pub pod_failure_policy: ::core::option::Option, + /// Specifies the number of retries before marking this job failed. + /// Defaults to 6 + /// +optional + #[prost(int32, optional, tag = "7")] + pub backoff_limit: ::core::option::Option, + /// Specifies the limit for the number of retries within an + /// index before marking this index as failed. When enabled the number of + /// failures per index is kept in the pod's + /// batch.kubernetes.io/job-index-failure-count annotation. It can only + /// be set when Job's completionMode=Indexed, and the Pod's restart + /// policy is Never. The field is immutable. + /// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + /// feature gate is enabled (disabled by default). + /// +optional + #[prost(int32, optional, tag = "12")] + pub backoff_limit_per_index: ::core::option::Option, + /// Specifies the maximal number of failed indexes before marking the Job as + /// failed, when backoffLimitPerIndex is set. Once the number of failed + /// indexes exceeds this number the entire Job is marked as Failed and its + /// execution is terminated. When left as null the job continues execution of + /// all of its indexes and is marked with the `Complete` Job condition. + /// It can only be specified when backoffLimitPerIndex is set. + /// It can be null or up to completions. It is required and must be + /// less than or equal to 10^4 when is completions greater than 10^5. + /// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + /// feature gate is enabled (disabled by default). + /// +optional + #[prost(int32, optional, tag = "13")] + pub max_failed_indexes: ::core::option::Option, + /// A label query over pods that should match the pod count. + /// Normally, the system sets this field for you. + /// More info: + /// +optional + #[prost(message, optional, tag = "4")] + pub selector: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::LabelSelector, + >, + /// manualSelector controls generation of pod labels and pod selectors. + /// Leave `manualSelector` unset unless you are certain what you are doing. + /// When false or unset, the system pick labels unique to this job + /// and appends those labels to the pod template. When true, + /// the user is responsible for picking unique labels and specifying + /// the selector. Failure to pick a unique label may cause this + /// and other jobs to not function correctly. However, You may see + /// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + /// API. + /// More info: + /// +optional + #[prost(bool, optional, tag = "5")] + pub manual_selector: ::core::option::Option, + /// Describes the pod that will be created when executing a job. + /// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". + /// More info: + #[prost(message, optional, tag = "6")] + pub template: ::core::option::Option, + /// ttlSecondsAfterFinished limits the lifetime of a Job that has finished + /// execution (either Complete or Failed). If this field is set, + /// ttlSecondsAfterFinished after the Job finishes, it is eligible to be + /// automatically deleted. When the Job is being deleted, its lifecycle + /// guarantees (e.g. finalizers) will be honored. If this field is unset, + /// the Job won't be automatically deleted. If this field is set to zero, + /// the Job becomes eligible to be deleted immediately after it finishes. + /// +optional + #[prost(int32, optional, tag = "8")] + pub ttl_seconds_after_finished: ::core::option::Option, + /// completionMode specifies how Pod completions are tracked. It can be + /// `NonIndexed` (default) or `Indexed`. + /// + /// `NonIndexed` means that the Job is considered complete when there have + /// been .spec.completions successfully completed Pods. Each Pod completion is + /// homologous to each other. + /// + /// `Indexed` means that the Pods of a + /// Job get an associated completion index from 0 to (.spec.completions - 1), + /// available in the annotation batch.kubernetes.io/job-completion-index. + /// The Job is considered complete when there is one successfully completed Pod + /// for each index. + /// When value is `Indexed`, .spec.completions must be specified and + /// `.spec.parallelism` must be less than or equal to 10^5. + /// In addition, The Pod name takes the form + /// `$(job-name)-$(index)-$(random-string)`, + /// the Pod hostname takes the form `$(job-name)-$(index)`. + /// + /// More completion modes can be added in the future. + /// If the Job controller observes a mode that it doesn't recognize, which + /// is possible during upgrades due to version skew, the controller + /// skips updates for the Job. + /// +optional + #[prost(string, optional, tag = "9")] + pub completion_mode: ::core::option::Option<::prost::alloc::string::String>, + /// suspend specifies whether the Job controller should create Pods or not. If + /// a Job is created with suspend set to true, no Pods are created by the Job + /// controller. If a Job is suspended after creation (i.e. the flag goes from + /// false to true), the Job controller will delete all active Pods associated + /// with this Job. Users must design their workload to gracefully handle this. + /// Suspending a Job will reset the StartTime field of the Job, effectively + /// resetting the ActiveDeadlineSeconds timer too. Defaults to false. + /// + /// +optional + #[prost(bool, optional, tag = "10")] + pub suspend: ::core::option::Option, + /// podReplacementPolicy specifies when to create replacement Pods. + /// Possible values are: + /// - TerminatingOrFailed means that we recreate pods + /// when they are terminating (has a metadata.deletionTimestamp) or failed. + /// - Failed means to wait until a previously created Pod is fully terminated (has phase + /// Failed or Succeeded) before creating a replacement Pod. + /// + /// When using podFailurePolicy, Failed is the the only allowed value. + /// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. + /// This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. + /// +optional + #[prost(string, optional, tag = "14")] + pub pod_replacement_policy: ::core::option::Option<::prost::alloc::string::String>, +} +/// JobStatus represents the current state of a Job. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JobStatus { + /// The latest available observations of an object's current state. When a Job + /// fails, one of the conditions will have type "Failed" and status true. When + /// a Job is suspended, one of the conditions will have type "Suspended" and + /// status true; when the Job is resumed, the status of this condition will + /// become false. When a Job is completed, one of the conditions will have + /// type "Complete" and status true. + /// More info: + /// +optional + /// +patchMergeKey=type + /// +patchStrategy=merge + /// +listType=atomic + #[prost(message, repeated, tag = "1")] + pub conditions: ::prost::alloc::vec::Vec, + /// Represents time when the job controller started processing a job. When a + /// Job is created in the suspended state, this field is not set until the + /// first time it is resumed. This field is reset every time a Job is resumed + /// from suspension. It is represented in RFC3339 form and is in UTC. + /// +optional + #[prost(message, optional, tag = "2")] + pub start_time: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::Time, + >, + /// Represents time when the job was completed. It is not guaranteed to + /// be set in happens-before order across separate operations. + /// It is represented in RFC3339 form and is in UTC. + /// The completion time is only set when the job finishes successfully. + /// +optional + #[prost(message, optional, tag = "3")] + pub completion_time: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::Time, + >, + /// The number of pending and running pods. + /// +optional + #[prost(int32, optional, tag = "4")] + pub active: ::core::option::Option, + /// The number of pods which reached phase Succeeded. + /// +optional + #[prost(int32, optional, tag = "5")] + pub succeeded: ::core::option::Option, + /// The number of pods which reached phase Failed. + /// +optional + #[prost(int32, optional, tag = "6")] + pub failed: ::core::option::Option, + /// The number of pods which are terminating (in phase Pending or Running + /// and have a deletionTimestamp). + /// + /// This field is alpha-level. The job controller populates the field when + /// the feature gate JobPodReplacementPolicy is enabled (disabled by default). + /// +optional + #[prost(int32, optional, tag = "11")] + pub terminating: ::core::option::Option, + /// completedIndexes holds the completed indexes when .spec.completionMode = + /// "Indexed" in a text format. The indexes are represented as decimal integers + /// separated by commas. The numbers are listed in increasing order. Three or + /// more consecutive numbers are compressed and represented by the first and + /// last element of the series, separated by a hyphen. + /// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are + /// represented as "1,3-5,7". + /// +optional + #[prost(string, optional, tag = "7")] + pub completed_indexes: ::core::option::Option<::prost::alloc::string::String>, + /// FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. + /// The indexes are represented in the text format analogous as for the + /// `completedIndexes` field, ie. they are kept as decimal integers + /// separated by commas. The numbers are listed in increasing order. Three or + /// more consecutive numbers are compressed and represented by the first and + /// last element of the series, separated by a hyphen. + /// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are + /// represented as "1,3-5,7". + /// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + /// feature gate is enabled (disabled by default). + /// +optional + #[prost(string, optional, tag = "10")] + pub failed_indexes: ::core::option::Option<::prost::alloc::string::String>, + /// uncountedTerminatedPods holds the UIDs of Pods that have terminated but + /// the job controller hasn't yet accounted for in the status counters. + /// + /// The job controller creates pods with a finalizer. When a pod terminates + /// (succeeded or failed), the controller does three steps to account for it + /// in the job status: + /// + /// 1. Add the pod UID to the arrays in this field. + /// 2. Remove the pod finalizer. + /// 3. Remove the pod UID from the arrays while increasing the corresponding + /// counter. + /// + /// Old jobs might not be tracked using this field, in which case the field + /// remains null. + /// +optional + #[prost(message, optional, tag = "8")] + pub uncounted_terminated_pods: ::core::option::Option, + /// The number of pods which have a Ready condition. + /// + /// This field is beta-level. The job controller populates the field when + /// the feature gate JobReadyPods is enabled (enabled by default). + /// +optional + #[prost(int32, optional, tag = "9")] + pub ready: ::core::option::Option, +} +/// JobTemplateSpec describes the data a Job should have when created from a template +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JobTemplateSpec { + /// Standard object's metadata of the jobs created from this template. + /// More info: + /// +optional + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option< + super::super::super::apimachinery::pkg::apis::meta::v1::ObjectMeta, + >, + /// Specification of the desired behavior of the job. + /// More info: + /// +optional + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, +} +/// PodFailurePolicy describes how failed pods influence the backoffLimit. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PodFailurePolicy { + /// A list of pod failure policy rules. The rules are evaluated in order. + /// Once a rule matches a Pod failure, the remaining of the rules are ignored. + /// When no rule matches the Pod failure, the default handling applies - the + /// counter of pod failures is incremented and it is checked against + /// the backoffLimit. At most 20 elements are allowed. + /// +listType=atomic + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, +} +/// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling +/// a failed pod based on its container exit codes. In particular, it lookups the +/// .state.terminated.exitCode for each app container and init container status, +/// represented by the .status.containerStatuses and .status.initContainerStatuses +/// fields in the Pod status, respectively. Containers completed with success +/// (exit code 0) are excluded from the requirement check. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PodFailurePolicyOnExitCodesRequirement { + /// Restricts the check for exit codes to the container with the + /// specified name. When null, the rule applies to all containers. + /// When specified, it should match one the container or initContainer + /// names in the pod template. + /// +optional + #[prost(string, optional, tag = "1")] + pub container_name: ::core::option::Option<::prost::alloc::string::String>, + /// Represents the relationship between the container exit code(s) and the + /// specified values. Containers completed with success (exit code 0) are + /// excluded from the requirement check. Possible values are: + /// + /// - In: the requirement is satisfied if at least one container exit code + /// (might be multiple if there are multiple containers not restricted + /// by the 'containerName' field) is in the set of specified values. + /// - NotIn: the requirement is satisfied if at least one container exit code + /// (might be multiple if there are multiple containers not restricted + /// by the 'containerName' field) is not in the set of specified values. + /// Additional values are considered to be added in the future. Clients should + /// react to an unknown operator by assuming the requirement is not satisfied. + #[prost(string, optional, tag = "2")] + pub operator: ::core::option::Option<::prost::alloc::string::String>, + /// Specifies the set of values. Each returned container exit code (might be + /// multiple in case of multiple containers) is checked against this set of + /// values with respect to the operator. The list of values must be ordered + /// and must not contain duplicates. Value '0' cannot be used for the In operator. + /// At least one element is required. At most 255 elements are allowed. + /// +listType=set + #[prost(int32, repeated, packed = "false", tag = "3")] + pub values: ::prost::alloc::vec::Vec, +} +/// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching +/// an actual pod condition type. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PodFailurePolicyOnPodConditionsPattern { + /// Specifies the required Pod condition type. To match a pod condition + /// it is required that specified type equals the pod condition type. + #[prost(string, optional, tag = "1")] + pub r#type: ::core::option::Option<::prost::alloc::string::String>, + /// Specifies the required Pod condition status. To match a pod condition + /// it is required that the specified status equals the pod condition status. + /// Defaults to True. + #[prost(string, optional, tag = "2")] + pub status: ::core::option::Option<::prost::alloc::string::String>, +} +/// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. +/// One of onExitCodes and onPodConditions, but not both, can be used in each rule. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PodFailurePolicyRule { + /// Specifies the action taken on a pod failure when the requirements are satisfied. + /// Possible values are: + /// + /// - FailJob: indicates that the pod's job is marked as Failed and all + /// running pods are terminated. + /// - FailIndex: indicates that the pod's index is marked as Failed and will + /// not be restarted. + /// This value is alpha-level. It can be used when the + /// `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). + /// - Ignore: indicates that the counter towards the .backoffLimit is not + /// incremented and a replacement pod is created. + /// - Count: indicates that the pod is handled in the default way - the + /// counter towards the .backoffLimit is incremented. + /// Additional values are considered to be added in the future. Clients should + /// react to an unknown action by skipping the rule. + #[prost(string, optional, tag = "1")] + pub action: ::core::option::Option<::prost::alloc::string::String>, + /// Represents the requirement on the container exit codes. + /// +optional + #[prost(message, optional, tag = "2")] + pub on_exit_codes: ::core::option::Option, + /// Represents the requirement on the pod conditions. The requirement is represented + /// as a list of pod condition patterns. The requirement is satisfied if at + /// least one pattern matches an actual pod condition. At most 20 elements are allowed. + /// +listType=atomic + /// +optional + #[prost(message, repeated, tag = "3")] + pub on_pod_conditions: ::prost::alloc::vec::Vec< + PodFailurePolicyOnPodConditionsPattern, + >, +} +/// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't +/// been accounted in Job status counters. +#[derive(serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UncountedTerminatedPods { + /// succeeded holds UIDs of succeeded Pods. + /// +listType=set + /// +optional + #[prost(string, repeated, tag = "1")] + pub succeeded: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// failed holds UIDs of failed Pods. + /// +listType=set + /// +optional + #[prost(string, repeated, tag = "2")] + pub failed: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} diff --git a/src/protobuf_gen/protobufs.rs b/src/protobuf_gen/protobufs.rs index 5539d9a2..480f2021 100644 --- a/src/protobuf_gen/protobufs.rs +++ b/src/protobuf_gen/protobufs.rs @@ -29,6 +29,11 @@ pub mod k8s { include!("k8s.io.api.apps.v1.rs"); } } + pub mod batch { + pub mod v1 { + include!("k8s.io.api.batch.v1.rs"); + } + } pub mod core { pub mod v1 { include!("k8s.io.api.core.v1.rs");