From e01d4ba51d69be0bcf70f491dab3fa0019e91443 Mon Sep 17 00:00:00 2001 From: naoki-take Date: Tue, 24 Sep 2024 08:22:54 +0000 Subject: [PATCH] Revert "Cilium 1.14.14 Update" This reverts commit 1bce782d11a451240e461b4f2f7772c14235c257 and 9a146d2b9ad814105a8ef364b2ed76fa22c9e755. Signed-off-by: naoki-take --- Makefile | 2 +- artifacts.go | 8 +- artifacts_ignore.yaml | 8 + cilium/base/aggregate_cluster_role.yaml | 3 - cilium/pre/kustomization.yaml | 9 - cilium/pre/upstream.yaml | 307 +++++++----------------- cilium/pre/values.yaml | 8 +- cilium/prod/kustomization.yaml | 9 - cilium/prod/upstream.yaml | 307 +++++++----------------- cilium/prod/values.yaml | 8 +- etc/cilium-pre.yaml | 199 ++++----------- etc/cilium.yaml | 199 ++++----------- 12 files changed, 295 insertions(+), 772 deletions(-) diff --git a/Makefile b/Makefile index db0fd9954..9994d48e2 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,7 @@ update-coil: update-cilium: helm rm -rf /tmp/work-cilium mkdir -p /tmp/work-cilium - git clone --depth 1 --branch v$(shell echo $(CILIUM_TAG) | cut -d \. -f 1,2,3) https://github.com/cilium/cilium /tmp/work-cilium + git clone --depth 1 --branch v$(shell echo $(CILIUM_TAG) | cut -d \. -f 1,2,3)-lb-dsr-patch https://github.com/cybozu-go/cilium /tmp/work-cilium cd /tmp/work-cilium $(HELM) template /tmp/work-cilium/install/kubernetes/cilium/ \ --namespace=kube-system \ diff --git a/artifacts.go b/artifacts.go index 29f58cb19..6f9f6efcc 100644 --- a/artifacts.go +++ b/artifacts.go @@ -16,10 +16,10 @@ var CurrentArtifacts = ArtifactSet{ {Name: "squid", Repository: "ghcr.io/cybozu/squid", Tag: "6.10.0.1", Private: false}, {Name: "squid-exporter", Repository: "ghcr.io/cybozu/squid-exporter", Tag: "1.0.6", Private: false}, {Name: "vault", Repository: "ghcr.io/cybozu/vault", Tag: "1.17.5.1", Private: false}, - {Name: "cilium", Repository: "ghcr.io/cybozu/cilium", Tag: "1.14.14.1", Private: false}, - {Name: "cilium-operator-generic", Repository: "ghcr.io/cybozu/cilium-operator-generic", Tag: "1.14.14.1", Private: false}, - {Name: "hubble-relay", Repository: "ghcr.io/cybozu/hubble-relay", Tag: "1.14.14.1", Private: false}, - {Name: "cilium-certgen", Repository: "ghcr.io/cybozu/cilium-certgen", Tag: "0.1.14.1", Private: false}, + {Name: "cilium", Repository: "ghcr.io/cybozu/cilium", Tag: "1.13.16.4", Private: false}, + {Name: "cilium-operator-generic", Repository: "ghcr.io/cybozu/cilium-operator-generic", Tag: "1.13.16.1", Private: false}, + {Name: "hubble-relay", Repository: "ghcr.io/cybozu/hubble-relay", Tag: "1.13.16.1", Private: false}, + {Name: "cilium-certgen", Repository: "ghcr.io/cybozu/cilium-certgen", Tag: "0.1.11.1", Private: false}, }, Debs: []DebianPackage{ {Name: "etcdpasswd", Owner: "cybozu-go", Repository: "etcdpasswd", Release: "v1.4.8"}, diff --git a/artifacts_ignore.yaml b/artifacts_ignore.yaml index 24a5becc2..b1631c73a 100644 --- a/artifacts_ignore.yaml +++ b/artifacts_ignore.yaml @@ -1,4 +1,12 @@ images: +- repository: ghcr.io/cybozu/cilium + versions: ["1.14.13.1", "1.14.13.2", "1.14.14.1"] +- repository: ghcr.io/cybozu/cilium-operator-generic + versions: ["1.14.13.1", "1.14.14.1"] +- repository: ghcr.io/cybozu/hubble-relay + versions: ["1.14.13.1", "1.14.13.2", "1.14.14.1"] +- repository: ghcr.io/cybozu/cilium-certgen + versions: ["0.1.14.1"] - repository: ghcr.io/cybozu/etcd versions: ["3.5.15.1"] osImage: diff --git a/cilium/base/aggregate_cluster_role.yaml b/cilium/base/aggregate_cluster_role.yaml index 9b1bc0b8f..5bfd4a6d1 100644 --- a/cilium/base/aggregate_cluster_role.yaml +++ b/cilium/base/aggregate_cluster_role.yaml @@ -50,9 +50,6 @@ rules: - ciliumexternalworkloads - ciliumexternalworkloads/finalizers - ciliumexternalworkloads/status - - ciliumcidrgroups - - ciliumcidrgroups/finalizers - - ciliumcidrgroups/status verbs: - "get" - "list" diff --git a/cilium/pre/kustomization.yaml b/cilium/pre/kustomization.yaml index a725da19a..8bb23dac1 100644 --- a/cilium/pre/kustomization.yaml +++ b/cilium/pre/kustomization.yaml @@ -33,15 +33,6 @@ patches: patch: |- - op: remove path: /spec/ttlSecondsAfterFinished - - target: - group: apps - version: v1 - kind: DaemonSet - name: cilium - patch: |- - - op: replace - path: /spec/updateStrategy/rollingUpdate/maxUnavailable - value: 1 configMapGenerator: - name: cilium-config namespace: kube-system diff --git a/cilium/pre/upstream.yaml b/cilium/pre/upstream.yaml index c6bbe436a..a3062e605 100644 --- a/cilium/pre/upstream.yaml +++ b/cilium/pre/upstream.yaml @@ -51,6 +51,8 @@ data: cilium-endpoint-gc-interval: "5m0s" nodes-gc-interval: "5m0s" skip-cnp-status-startup-clean: "false" + # Disable the usage of CiliumEndpoint CRD + disable-endpoint-crd: "false" # To include or exclude matched resources from cilium identity evaluation labels: " k8s:app k8s:io\\.cilium\\.k8s\\.namespace\\.labels\\.team k8s:io\\.kubernetes\\.pod\\.namespace k8s:k8s-app io\\.cilium\\.k8s\\.policy cybozu\\.io/family app\\.cybozu\\.io neco\\.cybozu\\.io\\/registry identity\\.neco\\.cybozu\\.io " @@ -68,14 +70,14 @@ data: # NOTE that this will open the port on ALL nodes where Cilium pods are # scheduled. prometheus-serve-addr: ":9962" + # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this + # field is not set. + proxy-prometheus-port: "9964" # Metrics that should be enabled or disabled from the default metric # list. (+metric_foo to enable metric_foo , -metric_bar to disable # metric_bar). metrics: +cilium_bpf_map_pressure - # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this - # field is not set. - proxy-prometheus-port: "9964" # If you want metrics enabled in cilium-operator, set the port for # which the Cilium Operator will have their metrics exposed. # NOTE that this will open the port on the nodes where Cilium operator pod @@ -154,7 +156,7 @@ data: # - disabled # - vxlan (default) # - geneve - routing-mode: "native" + tunnel: "disabled" # Enables L7 proxy for L7 policy enforcement and visibility @@ -175,7 +177,6 @@ data: enable-local-node-route: "false" enable-ipv4-masquerade: "false" - enable-ipv4-big-tcp: "false" enable-ipv6-big-tcp: "false" enable-ipv6-masquerade: "true" @@ -211,7 +212,7 @@ data: pprof: "true" pprof-address: "0.0.0.0" pprof-port: "6060" - enable-k8s-networkpolicy: "true" + cni-uninstall: "true" # Disable health checking, when chaining mode is not set to portmap or none enable-endpoint-health-checking: "false" enable-health-checking: "true" @@ -231,12 +232,9 @@ data: hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt ipam: "cluster-pool" - ipam-cilium-node-update-rate: "15s" cluster-pool-ipv4-cidr: "10.0.0.0/8" cluster-pool-ipv4-mask-size: "24" disable-cnp-status-updates: "true" - cnp-node-status-gc-interval: "0s" - egress-gateway-reconciliation-trigger-interval: "1s" enable-vtep: "false" vtep-endpoint: "" vtep-cidr: "" @@ -249,37 +247,22 @@ data: cgroup-root: "/sys/fs/cgroup" enable-k8s-terminating-endpoint: "true" enable-sctp: "false" - k8s-client-qps: "5" - k8s-client-burst: "10" remove-cilium-node-taints: "true" - set-cilium-node-taints: "true" set-cilium-is-up-condition: "true" unmanaged-pod-watcher-interval: "15" - dnsproxy-socket-linger-timeout: "10" + # default DNS proxy to transparent mode + dnsproxy-enable-transparent-mode: "true" tofqdns-dns-reject-response-code: "refused" tofqdns-enable-dns-compression: "true" tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-idle-connection-grace-period: "0s" tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" tofqdns-proxy-response-max-delay: "100ms" bpf-ct-timeout-regular-any: 1h0m0s bpf-ct-timeout-service-any: 1h0m0s agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" - - mesh-auth-enabled: "true" - mesh-auth-queue-size: "1024" - mesh-auth-rotated-identities-queue-size: "1024" - mesh-auth-gc-interval: "5m0s" - - proxy-xff-num-trusted-hops-ingress: "0" - proxy-xff-num-trusted-hops-egress: "0" - proxy-connect-timeout: "2" - proxy-max-requests-per-connection: "0" - proxy-max-connection-duration-seconds: "0" - proxy-idle-timeout-seconds: "60" - - external-envoy-proxy: "false" --- # Source: cilium/templates/hubble-relay/configmap.yaml apiVersion: v1 @@ -292,17 +275,15 @@ data: cluster-name: default peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 - gops: true - gops-port: "9893" dial-timeout: retry-timeout: sort-buffer-len-max: sort-buffer-drain-timeout: - tls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt - tls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - tls-relay-server-cert-file: /var/lib/hubble-relay/tls/server.crt - tls-relay-server-key-file: /var/lib/hubble-relay/tls/server.key + tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt + tls-server-key-file: /var/lib/hubble-relay/tls/server.key --- # Source: cilium/templates/cilium-agent/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -367,9 +348,6 @@ rules: - ciliumnetworkpolicies - ciliumnodes - ciliumnodeconfigs - - ciliumcidrgroups - - ciliuml2announcementpolicies - - ciliumpodippools verbs: - list - watch @@ -410,7 +388,6 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - - ciliuml2announcementpolicies/status verbs: - patch --- @@ -586,24 +563,14 @@ rules: - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io - ciliumnodeconfigs.cilium.io - - ciliumcidrgroups.cilium.io - - ciliuml2announcementpolicies.cilium.io - - ciliumpodippools.cilium.io - apiGroups: - cilium.io resources: - ciliumloadbalancerippools - - ciliumpodippools verbs: - get - list - watch -- apiGroups: - - cilium.io - resources: - - ciliumpodippools - verbs: - - create - apiGroups: - cilium.io resources: @@ -655,6 +622,7 @@ rules: - secrets resourceNames: - cilium-ca + - hubble-ca-secret verbs: - get - update @@ -820,16 +788,14 @@ spec: matchLabels: k8s-app: cilium updateStrategy: - rollingUpdate: - maxUnavailable: 2 - type: RollingUpdate + type: OnDelete template: metadata: annotations: prometheus.io/port: "9962" prometheus.io/scrape: "true" # ensure pods roll when configmap updates - cilium.io/cilium-configmap-checksum: "d4bf08bf4c6ee946280b8e7bcb2586f8833a3a3e46137f6979d77b3020e1f546" + cilium.io/cilium-configmap-checksum: "89029740a4242a661efaec2ce058760e9d1e323c603534509c54902ef6891b1e" # Set app AppArmor's profile to "unconfined". The value of this annotation # can be modified as long users know which profiles they have available # in AppArmor. @@ -842,7 +808,7 @@ spec: spec: containers: - name: cilium-agent - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -899,6 +865,18 @@ spec: fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + name: cilium-config + key: cni-chaining-mode + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + name: cilium-config + key: custom-cni-conf + optional: true - name: KUBERNETES_SERVICE_HOST value: "127.0.0.1" - name: KUBERNETES_SERVICE_PORT @@ -910,26 +888,7 @@ spec: - "bash" - "-c" - | - set -o errexit - set -o pipefail - set -o nounset - - # When running in AWS ENI mode, it's likely that 'aws-node' has - # had a chance to install SNAT iptables rules. These can result - # in dropped traffic, so we should attempt to remove them. - # We do it using a 'postStart' hook since this may need to run - # for nodes which might have already been init'ed but may still - # have dangling rules. This is safe because there are no - # dependencies on anything that is part of the startup script - # itself, and can be safely run multiple times per node (e.g. in - # case of a restart). - if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; - then - echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore - fi - echo 'Done!' - + /cni-install.sh --enable-debug=false --cni-exclusive=true --log-file=/var/run/cilium/cilium-cni.log preStop: exec: command: @@ -1014,7 +973,7 @@ spec: mountPath: /tmp initContainers: - name: config - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - cilium @@ -1038,46 +997,11 @@ spec: - name: tmp mountPath: /tmp terminationMessagePolicy: FallbackToLogsOnError - - name: apply-sysctl-overwrites - image: "ghcr.io/cybozu/cilium:1.14.14.1" - imagePullPolicy: IfNotPresent - env: - - name: BIN_PATH - value: /opt/cni/bin - command: - - sh - - -ec - # The statically linked Go program binary is invoked to avoid any - # dependency on utilities like sh that can be missing on certain - # distros installed on the underlying host. Copy the binary to the - # same directory where we install cilium cni plugin so that exec permissions - # are available. - - | - cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; - nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; - rm /hostbin/cilium-sysctlfix - volumeMounts: - - name: hostproc - mountPath: /hostproc - - name: cni-path - mountPath: /hostbin - terminationMessagePolicy: FallbackToLogsOnError - securityContext: - seLinuxOptions: - level: s0 - type: spc_t - capabilities: - add: - - SYS_ADMIN - - SYS_CHROOT - - SYS_PTRACE - drop: - - ALL # Mount the bpf fs if it is not mounted. We will perform this task # from a privileged container because the mount propagation bidirectional # only works from privileged containers. - name: mount-bpf-fs - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' @@ -1093,7 +1017,7 @@ spec: mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -1138,7 +1062,7 @@ spec: mountPath: /var/run/cilium # wait-for-kube-proxy # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" @@ -1189,11 +1113,6 @@ spec: hostPath: path: /sys/fs/bpf type: DirectoryOrCreate - # To mount cgroup2 filesystem on the host or apply sysctlfix - - name: hostproc - hostPath: - path: /proc - type: Directory # To keep state between restarts / upgrades for cgroup2 filesystem - name: cilium-cgroup hostPath: @@ -1220,27 +1139,11 @@ spec: type: FileOrCreate # To read the clustermesh configuration - name: clustermesh-secrets - projected: + secret: + secretName: cilium-clustermesh # note: the leading zero means this number is in octal representation: do not remove it defaultMode: 0400 - sources: - - secret: - name: cilium-clustermesh - optional: true - # note: items are not explicitly listed here, since the entries of this secret - # depend on the peers configured, and that would cause a restart of all agents - # at every addition/removal. Leaving the field empty makes each secret entry - # to be automatically projected into the volume as a file whose name is the key. - - secret: - name: clustermesh-apiserver-remote-cert - optional: true - items: - - key: tls.key - path: common-etcd-client.key - - key: tls.crt - path: common-etcd-client.crt - - key: ca.crt - path: common-etcd-client-ca.crt + optional: true - name: bgp-config-path configMap: name: bgp-config @@ -1261,12 +1164,12 @@ spec: name: hubble-server-certs optional: true items: + - key: ca.crt + path: client-ca.crt - key: tls.crt path: server.crt - key: tls.key path: server.key - - key: ca.crt - path: client-ca.crt --- # Source: cilium/templates/cilium-operator/deployment.yaml apiVersion: apps/v1 @@ -1287,20 +1190,16 @@ spec: matchLabels: io.cilium/app: operator name: cilium-operator - # ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case - # of one replica and no user configured Recreate strategy. - # otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the - # podAntiAffinity which prevents deployments of multiple operator replicas on the same node. strategy: rollingUpdate: - maxSurge: 25% - maxUnavailable: 50% + maxSurge: 1 + maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: # ensure pods roll when configmap updates - cilium.io/cilium-configmap-checksum: "d4bf08bf4c6ee946280b8e7bcb2586f8833a3a3e46137f6979d77b3020e1f546" + cilium.io/cilium-configmap-checksum: "89029740a4242a661efaec2ce058760e9d1e323c603534509c54902ef6891b1e" prometheus.io/port: "9963" prometheus.io/scrape: "true" labels: @@ -1311,7 +1210,7 @@ spec: spec: containers: - name: cilium-operator - image: "ghcr.io/cybozu/cilium-operator-generic:1.14.14.1" + image: "ghcr.io/cybozu/cilium-operator-generic:1.13.16.1" imagePullPolicy: IfNotPresent command: - cilium-operator-generic @@ -1353,16 +1252,6 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 - readinessProbe: - httpGet: - host: "127.0.0.1" - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 5 - timeoutSeconds: 3 - failureThreshold: 5 volumeMounts: - name: cilium-config-path mountPath: /tmp/cilium/config-map @@ -1429,24 +1318,15 @@ spec: metadata: annotations: # ensure pods roll when configmap updates - cilium.io/hubble-relay-configmap-checksum: "021b54fa697399fbce31d464cf934ae4b921370cdcdcf3f98ca0a3d8a3201b76" + cilium.io/hubble-relay-configmap-checksum: "121d3ca340f3623a68297728e72f60908cf197df412eb4bb266f449c1794a5a7" labels: k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium spec: - securityContext: - fsGroup: 10000 containers: - name: hubble-relay - securityContext: - capabilities: - drop: - - ALL - runAsGroup: 10000 - runAsNonRoot: true - runAsUser: 10000 - image: "ghcr.io/cybozu/hubble-relay:1.14.14.1" + image: "ghcr.io/cybozu/hubble-relay:1.13.16.1" imagePullPolicy: IfNotPresent command: - hubble-relay @@ -1506,12 +1386,12 @@ spec: - secret: name: hubble-relay-client-certs items: + - key: ca.crt + path: hubble-server-ca.crt - key: tls.crt path: client.crt - key: tls.key path: client.key - - key: ca.crt - path: hubble-server-ca.crt - secret: name: hubble-relay-server-certs items: @@ -1520,6 +1400,49 @@ spec: - key: tls.key path: server.key --- +# Source: cilium/templates/hubble/tls-cronjob/job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hubble-generate-certs-4b23ed05ea + namespace: kube-system + labels: + k8s-app: hubble-generate-certs + app.kubernetes.io/name: hubble-generate-certs + app.kubernetes.io/part-of: cilium +spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + containers: + - name: certgen + image: "ghcr.io/cybozu/cilium-certgen:0.1.11.1" + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + - "--cilium-namespace=kube-system" + - "--ca-generate" + - "--ca-reuse-secret" + - "--hubble-server-cert-generate" + - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io" + - "--hubble-server-cert-validity-duration=94608000s" + - "--hubble-relay-client-cert-generate" + - "--hubble-relay-client-cert-validity-duration=94608000s" + - "--hubble-relay-server-cert-generate" + - "--hubble-relay-server-cert-validity-duration=94608000s" + hostNetwork: true + serviceAccount: "hubble-generate-certs" + serviceAccountName: "hubble-generate-certs" + automountServiceAccountToken: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 +--- # Source: cilium/templates/hubble/tls-cronjob/cronjob.yaml apiVersion: batch/v1 kind: CronJob @@ -1530,7 +1453,6 @@ metadata: k8s-app: hubble-generate-certs app.kubernetes.io/name: hubble-generate-certs app.kubernetes.io/part-of: cilium - annotations: spec: schedule: "0 0 1 */4 *" concurrencyPolicy: Forbid @@ -1543,7 +1465,7 @@ spec: spec: containers: - name: certgen - image: "ghcr.io/cybozu/cilium-certgen:0.1.14.1" + image: "ghcr.io/cybozu/cilium-certgen:0.1.11.1" imagePullPolicy: IfNotPresent command: - "/usr/bin/cilium-certgen" @@ -1572,48 +1494,3 @@ spec: # Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled. # Only create the namespace if it's different from Ingress and Gateway API secret namespaces (if enabled). ---- -# Source: cilium/templates/hubble/tls-cronjob/job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: hubble-generate-certs - namespace: kube-system - labels: - k8s-app: hubble-generate-certs - app.kubernetes.io/name: hubble-generate-certs - app.kubernetes.io/part-of: cilium - annotations: - "helm.sh/hook": post-install,post-upgrade -spec: - template: - metadata: - labels: - k8s-app: hubble-generate-certs - spec: - containers: - - name: certgen - image: "ghcr.io/cybozu/cilium-certgen:0.1.14.1" - imagePullPolicy: IfNotPresent - command: - - "/usr/bin/cilium-certgen" - # Because this is executed as a job, we pass the values as command - # line args instead of via config map. This allows users to inspect - # the values used in past runs by inspecting the completed pod. - args: - - "--cilium-namespace=kube-system" - - "--ca-generate" - - "--ca-reuse-secret" - - "--hubble-server-cert-generate" - - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io" - - "--hubble-server-cert-validity-duration=94608000s" - - "--hubble-relay-client-cert-generate" - - "--hubble-relay-client-cert-validity-duration=94608000s" - - "--hubble-relay-server-cert-generate" - - "--hubble-relay-server-cert-validity-duration=94608000s" - hostNetwork: true - serviceAccount: "hubble-generate-certs" - serviceAccountName: "hubble-generate-certs" - automountServiceAccountToken: true - restartPolicy: OnFailure - ttlSecondsAfterFinished: 1800 diff --git a/cilium/pre/values.yaml b/cilium/pre/values.yaml index bc6580ce0..a03a95f9d 100644 --- a/cilium/pre/values.yaml +++ b/cilium/pre/values.yaml @@ -38,11 +38,6 @@ hubble: requests: cpu: 210m memory: 120Mi - podSecurityContext: - fsGroup: 10000 - securityContext: - runAsUser: 10000 - runAsGroup: 10000 tls: auto: method: "cronJob" @@ -103,4 +98,7 @@ socketLB: enabled: true hostNamespaceOnly: true tunnel: "disabled" +updateStrategy: + rollingUpdate: null + type: OnDelete upgradeCompatibility: "1.12" diff --git a/cilium/prod/kustomization.yaml b/cilium/prod/kustomization.yaml index a725da19a..8bb23dac1 100644 --- a/cilium/prod/kustomization.yaml +++ b/cilium/prod/kustomization.yaml @@ -33,15 +33,6 @@ patches: patch: |- - op: remove path: /spec/ttlSecondsAfterFinished - - target: - group: apps - version: v1 - kind: DaemonSet - name: cilium - patch: |- - - op: replace - path: /spec/updateStrategy/rollingUpdate/maxUnavailable - value: 1 configMapGenerator: - name: cilium-config namespace: kube-system diff --git a/cilium/prod/upstream.yaml b/cilium/prod/upstream.yaml index 97a13ff39..c2c3b003c 100644 --- a/cilium/prod/upstream.yaml +++ b/cilium/prod/upstream.yaml @@ -51,6 +51,8 @@ data: cilium-endpoint-gc-interval: "5m0s" nodes-gc-interval: "5m0s" skip-cnp-status-startup-clean: "false" + # Disable the usage of CiliumEndpoint CRD + disable-endpoint-crd: "false" # To include or exclude matched resources from cilium identity evaluation labels: " k8s:app k8s:io\\.cilium\\.k8s\\.namespace\\.labels\\.team k8s:io\\.kubernetes\\.pod\\.namespace k8s:k8s-app io\\.cilium\\.k8s\\.policy cybozu\\.io/family app\\.cybozu\\.io neco\\.cybozu\\.io\\/registry identity\\.neco\\.cybozu\\.io " @@ -68,14 +70,14 @@ data: # NOTE that this will open the port on ALL nodes where Cilium pods are # scheduled. prometheus-serve-addr: ":9962" + # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this + # field is not set. + proxy-prometheus-port: "9964" # Metrics that should be enabled or disabled from the default metric # list. (+metric_foo to enable metric_foo , -metric_bar to disable # metric_bar). metrics: +cilium_bpf_map_pressure - # Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this - # field is not set. - proxy-prometheus-port: "9964" # If you want metrics enabled in cilium-operator, set the port for # which the Cilium Operator will have their metrics exposed. # NOTE that this will open the port on the nodes where Cilium operator pod @@ -154,7 +156,7 @@ data: # - disabled # - vxlan (default) # - geneve - routing-mode: "native" + tunnel: "disabled" # Enables L7 proxy for L7 policy enforcement and visibility @@ -175,7 +177,6 @@ data: enable-local-node-route: "false" enable-ipv4-masquerade: "false" - enable-ipv4-big-tcp: "false" enable-ipv6-big-tcp: "false" enable-ipv6-masquerade: "true" @@ -208,7 +209,7 @@ data: enable-svc-source-range-check: "true" enable-l2-neigh-discovery: "true" arping-refresh-period: "30s" - enable-k8s-networkpolicy: "true" + cni-uninstall: "true" # Disable health checking, when chaining mode is not set to portmap or none enable-endpoint-health-checking: "false" enable-health-checking: "true" @@ -228,12 +229,9 @@ data: hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt ipam: "cluster-pool" - ipam-cilium-node-update-rate: "15s" cluster-pool-ipv4-cidr: "10.0.0.0/8" cluster-pool-ipv4-mask-size: "24" disable-cnp-status-updates: "true" - cnp-node-status-gc-interval: "0s" - egress-gateway-reconciliation-trigger-interval: "1s" enable-vtep: "false" vtep-endpoint: "" vtep-cidr: "" @@ -246,37 +244,22 @@ data: cgroup-root: "/sys/fs/cgroup" enable-k8s-terminating-endpoint: "true" enable-sctp: "false" - k8s-client-qps: "5" - k8s-client-burst: "10" remove-cilium-node-taints: "true" - set-cilium-node-taints: "true" set-cilium-is-up-condition: "true" unmanaged-pod-watcher-interval: "15" - dnsproxy-socket-linger-timeout: "10" + # default DNS proxy to transparent mode + dnsproxy-enable-transparent-mode: "true" tofqdns-dns-reject-response-code: "refused" tofqdns-enable-dns-compression: "true" tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-idle-connection-grace-period: "0s" tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" tofqdns-proxy-response-max-delay: "100ms" bpf-ct-timeout-regular-any: 1h0m0s bpf-ct-timeout-service-any: 1h0m0s agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" - - mesh-auth-enabled: "true" - mesh-auth-queue-size: "1024" - mesh-auth-rotated-identities-queue-size: "1024" - mesh-auth-gc-interval: "5m0s" - - proxy-xff-num-trusted-hops-ingress: "0" - proxy-xff-num-trusted-hops-egress: "0" - proxy-connect-timeout: "2" - proxy-max-requests-per-connection: "0" - proxy-max-connection-duration-seconds: "0" - proxy-idle-timeout-seconds: "60" - - external-envoy-proxy: "false" --- # Source: cilium/templates/hubble-relay/configmap.yaml apiVersion: v1 @@ -289,17 +272,15 @@ data: cluster-name: default peer-service: "hubble-peer.kube-system.svc.cluster.local:443" listen-address: :4245 - gops: true - gops-port: "9893" dial-timeout: retry-timeout: sort-buffer-len-max: sort-buffer-drain-timeout: - tls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt - tls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key + tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt + tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - tls-relay-server-cert-file: /var/lib/hubble-relay/tls/server.crt - tls-relay-server-key-file: /var/lib/hubble-relay/tls/server.key + tls-server-cert-file: /var/lib/hubble-relay/tls/server.crt + tls-server-key-file: /var/lib/hubble-relay/tls/server.key --- # Source: cilium/templates/cilium-agent/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -364,9 +345,6 @@ rules: - ciliumnetworkpolicies - ciliumnodes - ciliumnodeconfigs - - ciliumcidrgroups - - ciliuml2announcementpolicies - - ciliumpodippools verbs: - list - watch @@ -407,7 +385,6 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - - ciliuml2announcementpolicies/status verbs: - patch --- @@ -583,24 +560,14 @@ rules: - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io - ciliumnodeconfigs.cilium.io - - ciliumcidrgroups.cilium.io - - ciliuml2announcementpolicies.cilium.io - - ciliumpodippools.cilium.io - apiGroups: - cilium.io resources: - ciliumloadbalancerippools - - ciliumpodippools verbs: - get - list - watch -- apiGroups: - - cilium.io - resources: - - ciliumpodippools - verbs: - - create - apiGroups: - cilium.io resources: @@ -652,6 +619,7 @@ rules: - secrets resourceNames: - cilium-ca + - hubble-ca-secret verbs: - get - update @@ -817,16 +785,14 @@ spec: matchLabels: k8s-app: cilium updateStrategy: - rollingUpdate: - maxUnavailable: 2 - type: RollingUpdate + type: OnDelete template: metadata: annotations: prometheus.io/port: "9962" prometheus.io/scrape: "true" # ensure pods roll when configmap updates - cilium.io/cilium-configmap-checksum: "d5a6358f3358cdc61bf73eddd0be4f8a5b8909d0f95d0236cd095e308678a1a0" + cilium.io/cilium-configmap-checksum: "6ce5254ae5e45c178f019621aa0bca076d336d1231fd90ddb8df2f77e2ebc667" # Set app AppArmor's profile to "unconfined". The value of this annotation # can be modified as long users know which profiles they have available # in AppArmor. @@ -839,7 +805,7 @@ spec: spec: containers: - name: cilium-agent - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - cilium-agent @@ -896,6 +862,18 @@ spec: fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + name: cilium-config + key: cni-chaining-mode + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + name: cilium-config + key: custom-cni-conf + optional: true - name: KUBERNETES_SERVICE_HOST value: "127.0.0.1" - name: KUBERNETES_SERVICE_PORT @@ -907,26 +885,7 @@ spec: - "bash" - "-c" - | - set -o errexit - set -o pipefail - set -o nounset - - # When running in AWS ENI mode, it's likely that 'aws-node' has - # had a chance to install SNAT iptables rules. These can result - # in dropped traffic, so we should attempt to remove them. - # We do it using a 'postStart' hook since this may need to run - # for nodes which might have already been init'ed but may still - # have dangling rules. This is safe because there are no - # dependencies on anything that is part of the startup script - # itself, and can be safely run multiple times per node (e.g. in - # case of a restart). - if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; - then - echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore - fi - echo 'Done!' - + /cni-install.sh --enable-debug=false --cni-exclusive=true --log-file=/var/run/cilium/cilium-cni.log preStop: exec: command: @@ -1011,7 +970,7 @@ spec: mountPath: /tmp initContainers: - name: config - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - cilium @@ -1035,46 +994,11 @@ spec: - name: tmp mountPath: /tmp terminationMessagePolicy: FallbackToLogsOnError - - name: apply-sysctl-overwrites - image: "ghcr.io/cybozu/cilium:1.14.14.1" - imagePullPolicy: IfNotPresent - env: - - name: BIN_PATH - value: /opt/cni/bin - command: - - sh - - -ec - # The statically linked Go program binary is invoked to avoid any - # dependency on utilities like sh that can be missing on certain - # distros installed on the underlying host. Copy the binary to the - # same directory where we install cilium cni plugin so that exec permissions - # are available. - - | - cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; - nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; - rm /hostbin/cilium-sysctlfix - volumeMounts: - - name: hostproc - mountPath: /hostproc - - name: cni-path - mountPath: /hostbin - terminationMessagePolicy: FallbackToLogsOnError - securityContext: - seLinuxOptions: - level: s0 - type: spc_t - capabilities: - add: - - SYS_ADMIN - - SYS_CHROOT - - SYS_PTRACE - drop: - - ALL # Mount the bpf fs if it is not mounted. We will perform this task # from a privileged container because the mount propagation bidirectional # only works from privileged containers. - name: mount-bpf-fs - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent args: - 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf' @@ -1090,7 +1014,7 @@ spec: mountPath: /sys/fs/bpf mountPropagation: Bidirectional - name: clean-cilium-state - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - /init-container.sh @@ -1135,7 +1059,7 @@ spec: mountPath: /var/run/cilium # wait-for-kube-proxy # Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent - name: install-cni-binaries - image: "ghcr.io/cybozu/cilium:1.14.14.1" + image: "ghcr.io/cybozu/cilium:1.13.16.4" imagePullPolicy: IfNotPresent command: - "/install-plugin.sh" @@ -1186,11 +1110,6 @@ spec: hostPath: path: /sys/fs/bpf type: DirectoryOrCreate - # To mount cgroup2 filesystem on the host or apply sysctlfix - - name: hostproc - hostPath: - path: /proc - type: Directory # To keep state between restarts / upgrades for cgroup2 filesystem - name: cilium-cgroup hostPath: @@ -1217,27 +1136,11 @@ spec: type: FileOrCreate # To read the clustermesh configuration - name: clustermesh-secrets - projected: + secret: + secretName: cilium-clustermesh # note: the leading zero means this number is in octal representation: do not remove it defaultMode: 0400 - sources: - - secret: - name: cilium-clustermesh - optional: true - # note: items are not explicitly listed here, since the entries of this secret - # depend on the peers configured, and that would cause a restart of all agents - # at every addition/removal. Leaving the field empty makes each secret entry - # to be automatically projected into the volume as a file whose name is the key. - - secret: - name: clustermesh-apiserver-remote-cert - optional: true - items: - - key: tls.key - path: common-etcd-client.key - - key: tls.crt - path: common-etcd-client.crt - - key: ca.crt - path: common-etcd-client-ca.crt + optional: true - name: bgp-config-path configMap: name: bgp-config @@ -1258,12 +1161,12 @@ spec: name: hubble-server-certs optional: true items: + - key: ca.crt + path: client-ca.crt - key: tls.crt path: server.crt - key: tls.key path: server.key - - key: ca.crt - path: client-ca.crt --- # Source: cilium/templates/cilium-operator/deployment.yaml apiVersion: apps/v1 @@ -1284,20 +1187,16 @@ spec: matchLabels: io.cilium/app: operator name: cilium-operator - # ensure operator update on single node k8s clusters, by using rolling update with maxUnavailable=100% in case - # of one replica and no user configured Recreate strategy. - # otherwise an update might get stuck due to the default maxUnavailable=50% in combination with the - # podAntiAffinity which prevents deployments of multiple operator replicas on the same node. strategy: rollingUpdate: - maxSurge: 25% - maxUnavailable: 50% + maxSurge: 1 + maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: # ensure pods roll when configmap updates - cilium.io/cilium-configmap-checksum: "d5a6358f3358cdc61bf73eddd0be4f8a5b8909d0f95d0236cd095e308678a1a0" + cilium.io/cilium-configmap-checksum: "6ce5254ae5e45c178f019621aa0bca076d336d1231fd90ddb8df2f77e2ebc667" prometheus.io/port: "9963" prometheus.io/scrape: "true" labels: @@ -1308,7 +1207,7 @@ spec: spec: containers: - name: cilium-operator - image: "ghcr.io/cybozu/cilium-operator-generic:1.14.14.1" + image: "ghcr.io/cybozu/cilium-operator-generic:1.13.16.1" imagePullPolicy: IfNotPresent command: - cilium-operator-generic @@ -1350,16 +1249,6 @@ spec: initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 - readinessProbe: - httpGet: - host: "127.0.0.1" - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 5 - timeoutSeconds: 3 - failureThreshold: 5 volumeMounts: - name: cilium-config-path mountPath: /tmp/cilium/config-map @@ -1423,24 +1312,15 @@ spec: metadata: annotations: # ensure pods roll when configmap updates - cilium.io/hubble-relay-configmap-checksum: "021b54fa697399fbce31d464cf934ae4b921370cdcdcf3f98ca0a3d8a3201b76" + cilium.io/hubble-relay-configmap-checksum: "121d3ca340f3623a68297728e72f60908cf197df412eb4bb266f449c1794a5a7" labels: k8s-app: hubble-relay app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium spec: - securityContext: - fsGroup: 10000 containers: - name: hubble-relay - securityContext: - capabilities: - drop: - - ALL - runAsGroup: 10000 - runAsNonRoot: true - runAsUser: 10000 - image: "ghcr.io/cybozu/hubble-relay:1.14.14.1" + image: "ghcr.io/cybozu/hubble-relay:1.13.16.1" imagePullPolicy: IfNotPresent command: - hubble-relay @@ -1497,12 +1377,12 @@ spec: - secret: name: hubble-relay-client-certs items: + - key: ca.crt + path: hubble-server-ca.crt - key: tls.crt path: client.crt - key: tls.key path: client.key - - key: ca.crt - path: hubble-server-ca.crt - secret: name: hubble-relay-server-certs items: @@ -1511,6 +1391,49 @@ spec: - key: tls.key path: server.key --- +# Source: cilium/templates/hubble/tls-cronjob/job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hubble-generate-certs-4b23ed05ea + namespace: kube-system + labels: + k8s-app: hubble-generate-certs + app.kubernetes.io/name: hubble-generate-certs + app.kubernetes.io/part-of: cilium +spec: + template: + metadata: + labels: + k8s-app: hubble-generate-certs + spec: + containers: + - name: certgen + image: "ghcr.io/cybozu/cilium-certgen:0.1.11.1" + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/cilium-certgen" + # Because this is executed as a job, we pass the values as command + # line args instead of via config map. This allows users to inspect + # the values used in past runs by inspecting the completed pod. + args: + - "--cilium-namespace=kube-system" + - "--ca-generate" + - "--ca-reuse-secret" + - "--hubble-server-cert-generate" + - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io" + - "--hubble-server-cert-validity-duration=94608000s" + - "--hubble-relay-client-cert-generate" + - "--hubble-relay-client-cert-validity-duration=94608000s" + - "--hubble-relay-server-cert-generate" + - "--hubble-relay-server-cert-validity-duration=94608000s" + hostNetwork: true + serviceAccount: "hubble-generate-certs" + serviceAccountName: "hubble-generate-certs" + automountServiceAccountToken: true + restartPolicy: OnFailure + ttlSecondsAfterFinished: 1800 +--- # Source: cilium/templates/hubble/tls-cronjob/cronjob.yaml apiVersion: batch/v1 kind: CronJob @@ -1521,7 +1444,6 @@ metadata: k8s-app: hubble-generate-certs app.kubernetes.io/name: hubble-generate-certs app.kubernetes.io/part-of: cilium - annotations: spec: schedule: "0 0 1 */4 *" concurrencyPolicy: Forbid @@ -1534,7 +1456,7 @@ spec: spec: containers: - name: certgen - image: "ghcr.io/cybozu/cilium-certgen:0.1.14.1" + image: "ghcr.io/cybozu/cilium-certgen:0.1.11.1" imagePullPolicy: IfNotPresent command: - "/usr/bin/cilium-certgen" @@ -1563,48 +1485,3 @@ spec: # Only create the namespace if it's different from Ingress secret namespace or Ingress is not enabled. # Only create the namespace if it's different from Ingress and Gateway API secret namespaces (if enabled). ---- -# Source: cilium/templates/hubble/tls-cronjob/job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: hubble-generate-certs - namespace: kube-system - labels: - k8s-app: hubble-generate-certs - app.kubernetes.io/name: hubble-generate-certs - app.kubernetes.io/part-of: cilium - annotations: - "helm.sh/hook": post-install,post-upgrade -spec: - template: - metadata: - labels: - k8s-app: hubble-generate-certs - spec: - containers: - - name: certgen - image: "ghcr.io/cybozu/cilium-certgen:0.1.14.1" - imagePullPolicy: IfNotPresent - command: - - "/usr/bin/cilium-certgen" - # Because this is executed as a job, we pass the values as command - # line args instead of via config map. This allows users to inspect - # the values used in past runs by inspecting the completed pod. - args: - - "--cilium-namespace=kube-system" - - "--ca-generate" - - "--ca-reuse-secret" - - "--hubble-server-cert-generate" - - "--hubble-server-cert-common-name=*.default.hubble-grpc.cilium.io" - - "--hubble-server-cert-validity-duration=94608000s" - - "--hubble-relay-client-cert-generate" - - "--hubble-relay-client-cert-validity-duration=94608000s" - - "--hubble-relay-server-cert-generate" - - "--hubble-relay-server-cert-validity-duration=94608000s" - hostNetwork: true - serviceAccount: "hubble-generate-certs" - serviceAccountName: "hubble-generate-certs" - automountServiceAccountToken: true - restartPolicy: OnFailure - ttlSecondsAfterFinished: 1800 diff --git a/cilium/prod/values.yaml b/cilium/prod/values.yaml index d56f7ed66..ad673a00c 100644 --- a/cilium/prod/values.yaml +++ b/cilium/prod/values.yaml @@ -35,11 +35,6 @@ hubble: requests: cpu: 100m memory: 200Mi - podSecurityContext: - fsGroup: 10000 - securityContext: - runAsUser: 10000 - runAsGroup: 10000 tls: auto: method: "cronJob" @@ -93,4 +88,7 @@ socketLB: enabled: true hostNamespaceOnly: true tunnel: "disabled" +updateStrategy: + rollingUpdate: null + type: OnDelete upgradeCompatibility: "1.12" diff --git a/etc/cilium-pre.yaml b/etc/cilium-pre.yaml index 4e132f1e5..ae02b6925 100644 --- a/etc/cilium-pre.yaml +++ b/etc/cilium-pre.yaml @@ -91,9 +91,6 @@ rules: - ciliumexternalworkloads - ciliumexternalworkloads/finalizers - ciliumexternalworkloads/status - - ciliumcidrgroups - - ciliumcidrgroups/finalizers - - ciliumcidrgroups/status verbs: - get - list @@ -158,9 +155,6 @@ rules: - ciliumnetworkpolicies - ciliumnodes - ciliumnodeconfigs - - ciliumcidrgroups - - ciliuml2announcementpolicies - - ciliumpodippools verbs: - list - watch @@ -200,7 +194,6 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - - ciliuml2announcementpolicies/status verbs: - patch --- @@ -358,9 +351,6 @@ rules: - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io - ciliumnodeconfigs.cilium.io - - ciliumcidrgroups.cilium.io - - ciliuml2announcementpolicies.cilium.io - - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -369,17 +359,10 @@ rules: - cilium.io resources: - ciliumloadbalancerippools - - ciliumpodippools verbs: - get - list - watch -- apiGroups: - - cilium.io - resources: - - ciliumpodippools - verbs: - - create - apiGroups: - cilium.io resources: @@ -422,6 +405,7 @@ rules: - "" resourceNames: - cilium-ca + - hubble-ca-secret resources: - secrets verbs: @@ -554,15 +538,15 @@ data: cluster-pool-ipv4-cidr: 10.0.0.0/8 cluster-pool-ipv4-mask-size: "24" cni-chaining-mode: generic-veth - cnp-node-status-gc-interval: 0s + cni-uninstall: "true" custom-cni-conf: "true" debug: "false" debug-verbose: "" devices: eth+ eno1+ eno2+ direct-routing-device: e+ disable-cnp-status-updates: "true" - dnsproxy-socket-linger-timeout: "10" - egress-gateway-reconciliation-trigger-interval: 1s + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "true" enable-auto-protect-node-port-range: "true" enable-bgp-control-plane: "false" enable-bpf-clock-probe: "false" @@ -575,12 +559,10 @@ data: enable-hubble: "true" enable-identity-mark: "false" enable-ipv4: "true" - enable-ipv4-big-tcp: "false" enable-ipv4-masquerade: "false" enable-ipv6: "false" enable-ipv6-big-tcp: "false" enable-ipv6-masquerade: "true" - enable-k8s-networkpolicy: "true" enable-k8s-terminating-endpoint: "true" enable-l2-neigh-discovery: "true" enable-l7-proxy: "true" @@ -596,7 +578,6 @@ data: enable-vtep: "false" enable-well-known-identities: "false" enable-xt-socket-fallback: "true" - external-envoy-proxy: "false" hubble-disable-tls: "false" hubble-listen-address: :4244 hubble-socket-path: /var/run/cilium/hubble.sock @@ -608,18 +589,11 @@ data: identity-heartbeat-timeout: 30m0s install-no-conntrack-iptables-rules: "false" ipam: cluster-pool - ipam-cilium-node-update-rate: 15s - k8s-client-burst: "10" - k8s-client-qps: "5" kube-proxy-replacement: partial kube-proxy-replacement-healthz-bind-address: "" labels: ' k8s:app k8s:io\.cilium\.k8s\.namespace\.labels\.team k8s:io\.kubernetes\.pod\.namespace k8s:k8s-app io\.cilium\.k8s\.policy cybozu\.io/family app\.cybozu\.io neco\.cybozu\.io\/registry identity\.neco\.cybozu\.io ' - mesh-auth-enabled: "true" - mesh-auth-gc-interval: 5m0s - mesh-auth-queue-size: "1024" - mesh-auth-rotated-identities-queue-size: "1024" metrics: +cilium_bpf_map_pressure monitor-aggregation: medium monitor-aggregation-flags: all @@ -635,17 +609,9 @@ data: preallocate-bpf-maps: "false" procfs: /host/proc prometheus-serve-addr: :9962 - proxy-connect-timeout: "2" - proxy-idle-timeout-seconds: "60" - proxy-max-connection-duration-seconds: "0" - proxy-max-requests-per-connection: "0" proxy-prometheus-port: "9964" - proxy-xff-num-trusted-hops-egress: "0" - proxy-xff-num-trusted-hops-ingress: "0" remove-cilium-node-taints: "true" - routing-mode: native set-cilium-is-up-condition: "true" - set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy skip-cnp-status-startup-clean: "false" synchronize-k8s-nodes: "true" @@ -654,7 +620,9 @@ data: tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-idle-connection-grace-period: 0s tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" tofqdns-proxy-response-max-delay: 100ms + tunnel: disabled unmanaged-pod-watcher-interval: "15" vtep-cidr: "" vtep-endpoint: "" @@ -668,10 +636,10 @@ metadata: apiVersion: v1 data: config.yaml: "cluster-name: default\npeer-service: \"hubble-peer.kube-system.svc.cluster.local:443\"\nlisten-address: - :4245\ngops: true\ngops-port: \"9893\"\ndial-timeout: \nretry-timeout: \nsort-buffer-len-max: - \nsort-buffer-drain-timeout: \ntls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt\ntls-hubble-client-key-file: - /var/lib/hubble-relay/tls/client.key\ntls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\ntls-relay-server-cert-file: - /var/lib/hubble-relay/tls/server.crt\ntls-relay-server-key-file: /var/lib/hubble-relay/tls/server.key\n" + :4245\ndial-timeout: \nretry-timeout: \nsort-buffer-len-max: \nsort-buffer-drain-timeout: + \ntls-client-cert-file: /var/lib/hubble-relay/tls/client.crt\ntls-client-key-file: + /var/lib/hubble-relay/tls/client.key\ntls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\ntls-server-cert-file: + /var/lib/hubble-relay/tls/server.crt\ntls-server-key-file: /var/lib/hubble-relay/tls/server.key\n" kind: ConfigMap metadata: name: hubble-relay-config @@ -757,13 +725,13 @@ spec: name: cilium-operator strategy: rollingUpdate: - maxSurge: 25% - maxUnavailable: 50% + maxSurge: 1 + maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: - cilium.io/cilium-configmap-checksum: d4bf08bf4c6ee946280b8e7bcb2586f8833a3a3e46137f6979d77b3020e1f546 + cilium.io/cilium-configmap-checksum: 89029740a4242a661efaec2ce058760e9d1e323c603534509c54902ef6891b1e prometheus.io/port: "9963" prometheus.io/scrape: "true" labels: @@ -807,7 +775,7 @@ spec: value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium-operator-generic:1.14.14.1 + image: ghcr.io/cybozu/cilium-operator-generic:1.13.16.1 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -824,16 +792,6 @@ spec: hostPort: 9963 name: prometheus protocol: TCP - readinessProbe: - failureThreshold: 5 - httpGet: - host: 127.0.0.1 - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 5 - timeoutSeconds: 3 resources: limits: cpu: 250m @@ -887,7 +845,7 @@ spec: template: metadata: annotations: - cilium.io/hubble-relay-configmap-checksum: 021b54fa697399fbce31d464cf934ae4b921370cdcdcf3f98ca0a3d8a3201b76 + cilium.io/hubble-relay-configmap-checksum: 121d3ca340f3623a68297728e72f60908cf197df412eb4bb266f449c1794a5a7 labels: app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium @@ -906,7 +864,7 @@ spec: - serve command: - hubble-relay - image: ghcr.io/cybozu/hubble-relay:1.14.14.1 + image: ghcr.io/cybozu/hubble-relay:1.13.16.1 imagePullPolicy: IfNotPresent livenessProbe: tcpSocket: @@ -925,13 +883,6 @@ spec: requests: cpu: 210m memory: 120Mi - securityContext: - capabilities: - drop: - - ALL - runAsGroup: 10000 - runAsNonRoot: true - runAsUser: 10000 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/hubble-relay @@ -944,8 +895,6 @@ spec: kubernetes.io/os: linux priorityClassName: null restartPolicy: Always - securityContext: - fsGroup: 10000 serviceAccount: hubble-relay serviceAccountName: hubble-relay terminationGracePeriodSeconds: 1 @@ -962,12 +911,12 @@ spec: sources: - secret: items: + - key: ca.crt + path: hubble-server-ca.crt - key: tls.crt path: client.crt - key: tls.key path: client.key - - key: ca.crt - path: hubble-server-ca.crt name: hubble-relay-client-certs - secret: items: @@ -1010,7 +959,7 @@ spec: - --hubble-relay-server-cert-validity-duration=94608000s command: - /usr/bin/cilium-certgen - image: ghcr.io/cybozu/cilium-certgen:0.1.14.1 + image: ghcr.io/cybozu/cilium-certgen:0.1.11.1 imagePullPolicy: IfNotPresent name: certgen hostNetwork: true @@ -1053,7 +1002,7 @@ spec: template: metadata: annotations: - cilium.io/cilium-configmap-checksum: d4bf08bf4c6ee946280b8e7bcb2586f8833a3a3e46137f6979d77b3020e1f546 + cilium.io/cilium-configmap-checksum: 89029740a4242a661efaec2ce058760e9d1e323c603534509c54902ef6891b1e container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined prometheus.io/port: "9962" @@ -1089,11 +1038,23 @@ spec: fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true - name: KUBERNETES_SERVICE_HOST value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent lifecycle: postStart: @@ -1102,25 +1063,7 @@ spec: - bash - -c - | - set -o errexit - set -o pipefail - set -o nounset - - # When running in AWS ENI mode, it's likely that 'aws-node' has - # had a chance to install SNAT iptables rules. These can result - # in dropped traffic, so we should attempt to remove them. - # We do it using a 'postStart' hook since this may need to run - # for nodes which might have already been init'ed but may still - # have dangling rules. This is safe because there are no - # dependencies on anything that is part of the startup script - # itself, and can be safely run multiple times per node (e.g. in - # case of a restart). - if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; - then - echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore - fi - echo 'Done!' + /cni-install.sh --enable-debug=false --cni-exclusive=true --log-file=/var/run/cilium/cilium-cni.log preStop: exec: command: @@ -1252,50 +1195,20 @@ spec: value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /tmp name: tmp - - command: - - sh - - -ec - - | - cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; - nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; - rm /hostbin/cilium-sysctlfix - env: - - name: BIN_PATH - value: /opt/cni/bin - image: ghcr.io/cybozu/cilium:1.14.14.1 - imagePullPolicy: IfNotPresent - name: apply-sysctl-overwrites - securityContext: - capabilities: - add: - - SYS_ADMIN - - SYS_CHROOT - - SYS_PTRACE - drop: - - ALL - seLinuxOptions: - level: s0 - type: spc_t - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /hostproc - name: hostproc - - mountPath: /hostbin - name: cni-path - args: - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf command: - /bin/bash - -c - -- - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: mount-bpf-fs securityContext: @@ -1324,7 +1237,7 @@ spec: value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: clean-cilium-state securityContext: @@ -1350,7 +1263,7 @@ spec: name: cilium-run - command: - /install-plugin.sh - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: install-cni-binaries resources: @@ -1388,10 +1301,6 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps - - hostPath: - path: /proc - type: Directory - name: hostproc - hostPath: path: /sys/fs/cgroup type: DirectoryOrCreate @@ -1412,22 +1321,10 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - projected: + secret: defaultMode: 256 - sources: - - secret: - name: cilium-clustermesh - optional: true - - secret: - items: - - key: tls.key - path: common-etcd-client.key - - key: tls.crt - path: common-etcd-client.crt - - key: ca.crt - path: common-etcd-client-ca.crt - name: clustermesh-apiserver-remote-cert - optional: true + optional: true + secretName: cilium-clustermesh - configMap: name: bgp-config name: bgp-config-path @@ -1445,29 +1342,25 @@ spec: sources: - secret: items: + - key: ca.crt + path: client-ca.crt - key: tls.crt path: server.crt - key: tls.key path: server.key - - key: ca.crt - path: client-ca.crt name: hubble-server-certs optional: true updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate + type: OnDelete --- apiVersion: batch/v1 kind: Job metadata: - annotations: - helm.sh/hook: post-install,post-upgrade labels: app.kubernetes.io/name: hubble-generate-certs app.kubernetes.io/part-of: cilium k8s-app: hubble-generate-certs - name: hubble-generate-certs + name: hubble-generate-certs-4b23ed05ea namespace: kube-system spec: template: @@ -1490,7 +1383,7 @@ spec: - --hubble-relay-server-cert-validity-duration=94608000s command: - /usr/bin/cilium-certgen - image: ghcr.io/cybozu/cilium-certgen:0.1.14.1 + image: ghcr.io/cybozu/cilium-certgen:0.1.11.1 imagePullPolicy: IfNotPresent name: certgen hostNetwork: true diff --git a/etc/cilium.yaml b/etc/cilium.yaml index 90e0b2758..21e735e39 100644 --- a/etc/cilium.yaml +++ b/etc/cilium.yaml @@ -91,9 +91,6 @@ rules: - ciliumexternalworkloads - ciliumexternalworkloads/finalizers - ciliumexternalworkloads/status - - ciliumcidrgroups - - ciliumcidrgroups/finalizers - - ciliumcidrgroups/status verbs: - get - list @@ -158,9 +155,6 @@ rules: - ciliumnetworkpolicies - ciliumnodes - ciliumnodeconfigs - - ciliumcidrgroups - - ciliuml2announcementpolicies - - ciliumpodippools verbs: - list - watch @@ -200,7 +194,6 @@ rules: - ciliumclusterwidenetworkpolicies/status - ciliumendpoints/status - ciliumendpoints - - ciliuml2announcementpolicies/status verbs: - patch --- @@ -358,9 +351,6 @@ rules: - ciliumnetworkpolicies.cilium.io - ciliumnodes.cilium.io - ciliumnodeconfigs.cilium.io - - ciliumcidrgroups.cilium.io - - ciliuml2announcementpolicies.cilium.io - - ciliumpodippools.cilium.io resources: - customresourcedefinitions verbs: @@ -369,17 +359,10 @@ rules: - cilium.io resources: - ciliumloadbalancerippools - - ciliumpodippools verbs: - get - list - watch -- apiGroups: - - cilium.io - resources: - - ciliumpodippools - verbs: - - create - apiGroups: - cilium.io resources: @@ -422,6 +405,7 @@ rules: - "" resourceNames: - cilium-ca + - hubble-ca-secret resources: - secrets verbs: @@ -554,15 +538,15 @@ data: cluster-pool-ipv4-cidr: 10.0.0.0/8 cluster-pool-ipv4-mask-size: "24" cni-chaining-mode: generic-veth - cnp-node-status-gc-interval: 0s + cni-uninstall: "true" custom-cni-conf: "true" debug: "false" debug-verbose: "" devices: eth+ eno1+ eno2+ direct-routing-device: e+ disable-cnp-status-updates: "true" - dnsproxy-socket-linger-timeout: "10" - egress-gateway-reconciliation-trigger-interval: 1s + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "true" enable-auto-protect-node-port-range: "true" enable-bgp-control-plane: "false" enable-bpf-clock-probe: "false" @@ -575,12 +559,10 @@ data: enable-hubble: "true" enable-identity-mark: "false" enable-ipv4: "true" - enable-ipv4-big-tcp: "false" enable-ipv4-masquerade: "false" enable-ipv6: "false" enable-ipv6-big-tcp: "false" enable-ipv6-masquerade: "true" - enable-k8s-networkpolicy: "true" enable-k8s-terminating-endpoint: "true" enable-l2-neigh-discovery: "true" enable-l7-proxy: "true" @@ -596,7 +578,6 @@ data: enable-vtep: "false" enable-well-known-identities: "false" enable-xt-socket-fallback: "true" - external-envoy-proxy: "false" hubble-disable-tls: "false" hubble-listen-address: :4244 hubble-socket-path: /var/run/cilium/hubble.sock @@ -608,18 +589,11 @@ data: identity-heartbeat-timeout: 30m0s install-no-conntrack-iptables-rules: "false" ipam: cluster-pool - ipam-cilium-node-update-rate: 15s - k8s-client-burst: "10" - k8s-client-qps: "5" kube-proxy-replacement: partial kube-proxy-replacement-healthz-bind-address: "" labels: ' k8s:app k8s:io\.cilium\.k8s\.namespace\.labels\.team k8s:io\.kubernetes\.pod\.namespace k8s:k8s-app io\.cilium\.k8s\.policy cybozu\.io/family app\.cybozu\.io neco\.cybozu\.io\/registry identity\.neco\.cybozu\.io ' - mesh-auth-enabled: "true" - mesh-auth-gc-interval: 5m0s - mesh-auth-queue-size: "1024" - mesh-auth-rotated-identities-queue-size: "1024" metrics: +cilium_bpf_map_pressure monitor-aggregation: medium monitor-aggregation-flags: all @@ -632,17 +606,9 @@ data: preallocate-bpf-maps: "false" procfs: /host/proc prometheus-serve-addr: :9962 - proxy-connect-timeout: "2" - proxy-idle-timeout-seconds: "60" - proxy-max-connection-duration-seconds: "0" - proxy-max-requests-per-connection: "0" proxy-prometheus-port: "9964" - proxy-xff-num-trusted-hops-egress: "0" - proxy-xff-num-trusted-hops-ingress: "0" remove-cilium-node-taints: "true" - routing-mode: native set-cilium-is-up-condition: "true" - set-cilium-node-taints: "true" sidecar-istio-proxy-image: cilium/istio_proxy skip-cnp-status-startup-clean: "false" synchronize-k8s-nodes: "true" @@ -651,7 +617,9 @@ data: tofqdns-endpoint-max-ip-per-hostname: "50" tofqdns-idle-connection-grace-period: 0s tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" tofqdns-proxy-response-max-delay: 100ms + tunnel: disabled unmanaged-pod-watcher-interval: "15" vtep-cidr: "" vtep-endpoint: "" @@ -665,10 +633,10 @@ metadata: apiVersion: v1 data: config.yaml: "cluster-name: default\npeer-service: \"hubble-peer.kube-system.svc.cluster.local:443\"\nlisten-address: - :4245\ngops: true\ngops-port: \"9893\"\ndial-timeout: \nretry-timeout: \nsort-buffer-len-max: - \nsort-buffer-drain-timeout: \ntls-hubble-client-cert-file: /var/lib/hubble-relay/tls/client.crt\ntls-hubble-client-key-file: - /var/lib/hubble-relay/tls/client.key\ntls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\ntls-relay-server-cert-file: - /var/lib/hubble-relay/tls/server.crt\ntls-relay-server-key-file: /var/lib/hubble-relay/tls/server.key\n" + :4245\ndial-timeout: \nretry-timeout: \nsort-buffer-len-max: \nsort-buffer-drain-timeout: + \ntls-client-cert-file: /var/lib/hubble-relay/tls/client.crt\ntls-client-key-file: + /var/lib/hubble-relay/tls/client.key\ntls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\ntls-server-cert-file: + /var/lib/hubble-relay/tls/server.crt\ntls-server-key-file: /var/lib/hubble-relay/tls/server.key\n" kind: ConfigMap metadata: name: hubble-relay-config @@ -754,13 +722,13 @@ spec: name: cilium-operator strategy: rollingUpdate: - maxSurge: 25% - maxUnavailable: 50% + maxSurge: 1 + maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: - cilium.io/cilium-configmap-checksum: d5a6358f3358cdc61bf73eddd0be4f8a5b8909d0f95d0236cd095e308678a1a0 + cilium.io/cilium-configmap-checksum: 6ce5254ae5e45c178f019621aa0bca076d336d1231fd90ddb8df2f77e2ebc667 prometheus.io/port: "9963" prometheus.io/scrape: "true" labels: @@ -804,7 +772,7 @@ spec: value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium-operator-generic:1.14.14.1 + image: ghcr.io/cybozu/cilium-operator-generic:1.13.16.1 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -821,16 +789,6 @@ spec: hostPort: 9963 name: prometheus protocol: TCP - readinessProbe: - failureThreshold: 5 - httpGet: - host: 127.0.0.1 - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 0 - periodSeconds: 5 - timeoutSeconds: 3 resources: requests: cpu: 100m @@ -881,7 +839,7 @@ spec: template: metadata: annotations: - cilium.io/hubble-relay-configmap-checksum: 021b54fa697399fbce31d464cf934ae4b921370cdcdcf3f98ca0a3d8a3201b76 + cilium.io/hubble-relay-configmap-checksum: 121d3ca340f3623a68297728e72f60908cf197df412eb4bb266f449c1794a5a7 labels: app.kubernetes.io/name: hubble-relay app.kubernetes.io/part-of: cilium @@ -900,7 +858,7 @@ spec: - serve command: - hubble-relay - image: ghcr.io/cybozu/hubble-relay:1.14.14.1 + image: ghcr.io/cybozu/hubble-relay:1.13.16.1 imagePullPolicy: IfNotPresent livenessProbe: tcpSocket: @@ -916,13 +874,6 @@ spec: requests: cpu: 100m memory: 200Mi - securityContext: - capabilities: - drop: - - ALL - runAsGroup: 10000 - runAsNonRoot: true - runAsUser: 10000 terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/hubble-relay @@ -935,8 +886,6 @@ spec: kubernetes.io/os: linux priorityClassName: null restartPolicy: Always - securityContext: - fsGroup: 10000 serviceAccount: hubble-relay serviceAccountName: hubble-relay terminationGracePeriodSeconds: 1 @@ -953,12 +902,12 @@ spec: sources: - secret: items: + - key: ca.crt + path: hubble-server-ca.crt - key: tls.crt path: client.crt - key: tls.key path: client.key - - key: ca.crt - path: hubble-server-ca.crt name: hubble-relay-client-certs - secret: items: @@ -1001,7 +950,7 @@ spec: - --hubble-relay-server-cert-validity-duration=94608000s command: - /usr/bin/cilium-certgen - image: ghcr.io/cybozu/cilium-certgen:0.1.14.1 + image: ghcr.io/cybozu/cilium-certgen:0.1.11.1 imagePullPolicy: IfNotPresent name: certgen hostNetwork: true @@ -1044,7 +993,7 @@ spec: template: metadata: annotations: - cilium.io/cilium-configmap-checksum: d5a6358f3358cdc61bf73eddd0be4f8a5b8909d0f95d0236cd095e308678a1a0 + cilium.io/cilium-configmap-checksum: 6ce5254ae5e45c178f019621aa0bca076d336d1231fd90ddb8df2f77e2ebc667 container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined prometheus.io/port: "9962" @@ -1080,11 +1029,23 @@ spec: fieldPath: metadata.namespace - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true - name: KUBERNETES_SERVICE_HOST value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent lifecycle: postStart: @@ -1093,25 +1054,7 @@ spec: - bash - -c - | - set -o errexit - set -o pipefail - set -o nounset - - # When running in AWS ENI mode, it's likely that 'aws-node' has - # had a chance to install SNAT iptables rules. These can result - # in dropped traffic, so we should attempt to remove them. - # We do it using a 'postStart' hook since this may need to run - # for nodes which might have already been init'ed but may still - # have dangling rules. This is safe because there are no - # dependencies on anything that is part of the startup script - # itself, and can be safely run multiple times per node (e.g. in - # case of a restart). - if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]]; - then - echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore - fi - echo 'Done!' + /cni-install.sh --enable-debug=false --cni-exclusive=true --log-file=/var/run/cilium/cilium-cni.log preStop: exec: command: @@ -1243,50 +1186,20 @@ spec: value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: config terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /tmp name: tmp - - command: - - sh - - -ec - - | - cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; - nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; - rm /hostbin/cilium-sysctlfix - env: - - name: BIN_PATH - value: /opt/cni/bin - image: ghcr.io/cybozu/cilium:1.14.14.1 - imagePullPolicy: IfNotPresent - name: apply-sysctl-overwrites - securityContext: - capabilities: - add: - - SYS_ADMIN - - SYS_CHROOT - - SYS_PTRACE - drop: - - ALL - seLinuxOptions: - level: s0 - type: spc_t - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - mountPath: /hostproc - name: hostproc - - mountPath: /hostbin - name: cni-path - args: - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf command: - /bin/bash - -c - -- - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: mount-bpf-fs securityContext: @@ -1315,7 +1228,7 @@ spec: value: 127.0.0.1 - name: KUBERNETES_SERVICE_PORT value: "16443" - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: clean-cilium-state securityContext: @@ -1341,7 +1254,7 @@ spec: name: cilium-run - command: - /install-plugin.sh - image: ghcr.io/cybozu/cilium:1.14.14.1 + image: ghcr.io/cybozu/cilium:1.13.16.4 imagePullPolicy: IfNotPresent name: install-cni-binaries resources: @@ -1379,10 +1292,6 @@ spec: path: /sys/fs/bpf type: DirectoryOrCreate name: bpf-maps - - hostPath: - path: /proc - type: Directory - name: hostproc - hostPath: path: /sys/fs/cgroup type: DirectoryOrCreate @@ -1403,22 +1312,10 @@ spec: type: FileOrCreate name: xtables-lock - name: clustermesh-secrets - projected: + secret: defaultMode: 256 - sources: - - secret: - name: cilium-clustermesh - optional: true - - secret: - items: - - key: tls.key - path: common-etcd-client.key - - key: tls.crt - path: common-etcd-client.crt - - key: ca.crt - path: common-etcd-client-ca.crt - name: clustermesh-apiserver-remote-cert - optional: true + optional: true + secretName: cilium-clustermesh - configMap: name: bgp-config name: bgp-config-path @@ -1436,29 +1333,25 @@ spec: sources: - secret: items: + - key: ca.crt + path: client-ca.crt - key: tls.crt path: server.crt - key: tls.key path: server.key - - key: ca.crt - path: client-ca.crt name: hubble-server-certs optional: true updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate + type: OnDelete --- apiVersion: batch/v1 kind: Job metadata: - annotations: - helm.sh/hook: post-install,post-upgrade labels: app.kubernetes.io/name: hubble-generate-certs app.kubernetes.io/part-of: cilium k8s-app: hubble-generate-certs - name: hubble-generate-certs + name: hubble-generate-certs-4b23ed05ea namespace: kube-system spec: template: @@ -1481,7 +1374,7 @@ spec: - --hubble-relay-server-cert-validity-duration=94608000s command: - /usr/bin/cilium-certgen - image: ghcr.io/cybozu/cilium-certgen:0.1.14.1 + image: ghcr.io/cybozu/cilium-certgen:0.1.11.1 imagePullPolicy: IfNotPresent name: certgen hostNetwork: true