From 4492122d0cfa221035818adbc01f785de66a1415 Mon Sep 17 00:00:00 2001 From: bingoct Date: Tue, 7 Nov 2023 14:53:41 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20adding=20config=20for=20crictl=20feat:?= =?UTF-8?q?=20cleanup=20remaining=20virtual=20network=20interface,=20conta?= =?UTF-8?q?iner=20and=20backup=20mounts=20fix:=20bap=20use=20K8S=5FCRTL=5F?= =?UTF-8?q?IP=20as=20rs=20by=20default=E3=80=82=20fix:=20cleanup=20node=20?= =?UTF-8?q?explict=20specify=20cri=20fix:=20explicit=20declaration=20sock?= =?UTF-8?q?=20schema=20fix:=20kubelet=20explict=20specify=20cgroupDriver?= =?UTF-8?q?=20as=20systemd=20fix:=20controlplane=20restart=20kubelet=20and?= =?UTF-8?q?=20wait=20style=EF=BC=9Ashell=20format?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bcs-ops/clean_master.sh | 4 +- bcs-ops/clean_node.sh | 42 +++++--- bcs-ops/env/offline-manifest.yaml | 6 +- bcs-ops/functions/k8s.sh | 21 +++- bcs-ops/install_master.sh | 2 + bcs-ops/install_node.sh | 2 +- bcs-ops/install_op.sh | 2 +- bcs-ops/k8s/install_containerd | 2 +- bcs-ops/k8s/install_docker | 2 +- bcs-ops/k8s/install_k8s_tools | 44 +++++++- bcs-ops/k8s/operate_completion | 14 ++- bcs-ops/k8s/optimize_k8s | 170 ++++++++++++++---------------- bcs-ops/k8s/render_kubeadm | 15 ++- 13 files changed, 197 insertions(+), 129 deletions(-) mode change 100644 => 100755 bcs-ops/env/offline-manifest.yaml mode change 100644 => 100755 bcs-ops/k8s/optimize_k8s diff --git a/bcs-ops/clean_master.sh b/bcs-ops/clean_master.sh index 2dfa695320..a17f3c7994 100755 --- a/bcs-ops/clean_master.sh +++ b/bcs-ops/clean_master.sh @@ -17,7 +17,7 @@ ROOT_DIR=${SELF_DIR} readonly SELF_DIR ROOT_DIR # only 1.2[0-1] to run -kubeadm reset phase update-cluster-status || true -kubeadm reset phase remove-etcd-member +kubeadm reset phase update-cluster-status --v=5 || true +kubeadm reset phase remove-etcd-member --v=5 "${ROOT_DIR}"/clean_node.sh diff --git a/bcs-ops/clean_node.sh b/bcs-ops/clean_node.sh index e7d1a11912..6159ca5d3f 100755 --- a/bcs-ops/clean_node.sh +++ b/bcs-ops/clean_node.sh @@ -18,20 +18,6 @@ ROOT_DIR=${SELF_DIR} readonly BAK_DIR SELF_DIR ROOT_DIR -clean_container() { - case "${CRI_TYPE,,}" in - "containerd") - ctr -n k8s.io t ls | grep -qv PID && ctr -n k8s.io t rm -f "$(ctr -n k8s.io t ls -q)" - ctr -n k8s.io c ls | grep -qv CONTAINER && ctr -n k8s.io c rm "$(ctr -n k8s.io c ls -q)" - systemctl disable --now containerd - ;; - "docker") - docker ps | grep -qv NAME && docker rm -f "$(docker ps -aq)" - systemctl disable --now docker - ;; - esac -} - safe_source() { local source_file=$1 if [[ -f ${source_file} ]]; then @@ -48,8 +34,30 @@ for file in "${source_files[@]}"; do safe_source "$file" done +clean_container() { + crictl ps -aq | xargs -r crictl rm -f +} + +clean_cni() { + case ${K8S_CNI} in + "flannel") + ip l | awk '/flannel/{eth=$2;gsub(":","",eth);print eth}' | xargs -r -n 1 ip l d + ;; + *) + return 0 + ;; + esac + rm -rf /etc/cni/net.d/* +} + +clean_vni() { + ip l | awk '/cni0|kube-ipvs0/{eth=$2;gsub(":","",eth);print eth}' | xargs -r -n 1 ip l d + ip l | awk '/veth/{eth=$2;split(eth,a,"@");print a[1]}' | xargs -r -n 1 ip l d +} + +kubeadm reset phase cleanup-node \ + --cri-socket "$(crictl config --get runtime-endpoint)" --v=5 systemctl disable --now kubelet -kubeadm reset phase cleanup-node clean_container ip l d cni0 || utils::log "WARN" "link cni0 does not exist" @@ -79,3 +87,7 @@ utils::log "OK" "Back Files >>> Done" "${ROOT_DIR}"/system/config_iptables.sh clean \ && utils::log "OK" "Clean k8s-components iptables rules" + +df -h | awk '/backup.*kubelet/{print $NF}' | xargs -r umount +clean_cni +clean_vni diff --git a/bcs-ops/env/offline-manifest.yaml b/bcs-ops/env/offline-manifest.yaml old mode 100644 new mode 100755 index ce06ec5e3c..728cc5ea0a --- a/bcs-ops/env/offline-manifest.yaml +++ b/bcs-ops/env/offline-manifest.yaml @@ -3,7 +3,7 @@ bcs-ops: bin-tools: k8s: "1.20.15" cni-plugins: "1.3.0" - crictl: "1.26.0" + crictl: "1.24.2" containerd: "1.6.21" runc: "1.1.8" docker: "19.03.9" @@ -42,7 +42,7 @@ bcs-ops: bin-tools: k8s: "1.23.17" cni-plugins: "1.3.0" - crictl: "1.26.0" + crictl: "1.24.2" containerd: "1.6.21" runc: "1.1.8" docker: "19.03.9" @@ -80,7 +80,7 @@ bcs-ops: bin-tools: k8s: "1.24.15" cni-plugins: "1.3.0" - crictl: "1.26.0" + crictl: "1.24.2" containerd: "1.6.21" runc: "1.1.8" jq: "1.6" diff --git a/bcs-ops/functions/k8s.sh b/bcs-ops/functions/k8s.sh index 404c41ab1c..4d2208375b 100644 --- a/bcs-ops/functions/k8s.sh +++ b/bcs-ops/functions/k8s.sh @@ -39,9 +39,9 @@ k8s::safe_add_helmrepo() { utils::log "INFO" "remove old helm repo: $repo_name" helm repo remove "$repo_name" fi - if ! helm repo add "$repo_name" "$repo_url";then - utils::log "ERROR" "can't add helm repo $repo_name $repo_url" - return 1 + if ! helm repo add "$repo_name" "$repo_url"; then + utils::log "ERROR" "can't add helm repo $repo_name $repo_url" + return 1 fi helm repo list if ! helm repo update; then @@ -98,3 +98,18 @@ k8s::restart_kubelet() { utils::log "ERROR" "restart kubelet service failed" return 1 } + +k8s::check_master() { + local timeout=5 + while ((timeout > 0)); do + if ! kubectl cluster-info; then + utils::log "WARN" "timeout=$timeout, \ +controlplane has not been started yet, waiting" + crictl ps + else + return 0 + fi + sleep 30 + done + return 1 +} diff --git a/bcs-ops/install_master.sh b/bcs-ops/install_master.sh index 0d477c49e3..c35794a0ad 100755 --- a/bcs-ops/install_master.sh +++ b/bcs-ops/install_master.sh @@ -100,6 +100,8 @@ else "${ROOT_DIR}"/system/config_envfile.sh -c clean K8S_CTRL_IP=$LAN_IP "${ROOT_DIR}"/system/config_envfile.sh -c init k8s::restart_kubelet + sleep 30 + k8s::check_master if [[ ${ENABLE_APISERVER_HA} == "true" ]]; then [[ -z ${VIP} ]] && utils::log "ERROR" "apiserver HA is enabled but VIP is not set" if [[ ${APISERVER_HA_MODE} == "kube-vip" ]]; then diff --git a/bcs-ops/install_node.sh b/bcs-ops/install_node.sh index 77cd5ecbe0..64adbf76b2 100755 --- a/bcs-ops/install_node.sh +++ b/bcs-ops/install_node.sh @@ -95,7 +95,7 @@ init_bap_rule() { esac [[ -z "${VIP}" ]] && utils::log "ERROR" "apiserver HA is enabled but VIP is not set" - "${PROXY_TOOL_PATH}"/bcs-apiserver-proxy-tools -cmd init -vs "${VIP}":"${VS_PORT}" -rs "${LAN_IP}":6443 \ + "${PROXY_TOOL_PATH}"/bcs-apiserver-proxy-tools -cmd init -vs "${VIP}":"${VS_PORT}" -rs "${K8S_CTRL_IP}":6443 \ -scheduler "${LVS_SCHEDULER}" -toolPath "${PROXY_TOOL_PATH}"/bcs-apiserver-proxy-tools "${ROOT_DIR}"/system/config_bcs_dns -u "${VIP}" k8s-api.bcs.local k8s::restart_kubelet diff --git a/bcs-ops/install_op.sh b/bcs-ops/install_op.sh index b06fab861d..a9bb0191fc 100755 --- a/bcs-ops/install_op.sh +++ b/bcs-ops/install_op.sh @@ -35,7 +35,7 @@ for file in "${source_files[@]}"; do safe_source "$file" done -"${ROOT_DIR}"/k8s/operate_completion kubeadm kubectl helm ctr yq +"${ROOT_DIR}"/k8s/operate_completion kubeadm kubectl helm ctr yq crictl if [[ -n "${BKREPO_URL:-}" ]]; then if command -v helm &>/dev/null; then diff --git a/bcs-ops/k8s/install_containerd b/bcs-ops/k8s/install_containerd index 34447663a1..1033066574 100755 --- a/bcs-ops/k8s/install_containerd +++ b/bcs-ops/k8s/install_containerd @@ -225,7 +225,7 @@ main() { fi if [[ -n ${BCS_OFFLINE:-} ]]; then - find "${ROOT_DIR}"/version-"${VERSION}"/images -name '*.tar' -type f -print0 \ + find "${ROOT_DIR}"/version-"${K8S_VER}"/images -name '*.tar' -type f -print0 \ | xargs -0 -I {} ctr -n k8s.io image import {} fi diff --git a/bcs-ops/k8s/install_docker b/bcs-ops/k8s/install_docker index 7f50f90bd0..e12517b0c1 100755 --- a/bcs-ops/k8s/install_docker +++ b/bcs-ops/k8s/install_docker @@ -207,7 +207,7 @@ main() { # bcs_offline load image if [[ -n ${BCS_OFFLINE:-} ]]; then - find "${ROOT_DIR}"/version-"${VERSION}"/images -name '*.tar' -type f -print0 \ + find "${ROOT_DIR}"/version-"${K8S_VER}"/images -name '*.tar' -type f -print0 \ | xargs -0 -I {} docker load -i {} fi diff --git a/bcs-ops/k8s/install_k8s_tools b/bcs-ops/k8s/install_k8s_tools index 5cacb51bb4..0bc64c733b 100755 --- a/bcs-ops/k8s/install_k8s_tools +++ b/bcs-ops/k8s/install_k8s_tools @@ -69,7 +69,13 @@ _yum_k8s() { | awk '/'"$pkg_pat"'/{last=$2} END{sub(/.*:/,"",last);print last}')" [[ -n $pkg_ver ]] \ || utils::log "ERROR" "${K8S_VER:-} not found amongst yum list results" - yum install -y "kubeadm-${pkg_ver}" "kubelet-${pkg_ver}" "kubectl-${pkg_ver}" + local crictl_pat crictl_ver + crictl_pat=$(awk '/crictl:/{gsub("\"","",$2);print $2;exit}' "${ROOT_DIR}"/env/offline-manifest.yaml) + + crictl_ver=$(yum list cri-tools --showduplicates \ + | awk '/'"${crictl_pat}"'/{last=$2} END{sub(/.*:/,"",last);print last}') + + yum install -y "kubeadm-${pkg_ver}" "kubelet-${pkg_ver}" "kubectl-${pkg_ver}" "cri-tools-${crictl_ver}" } _curl_k8s() { @@ -78,7 +84,9 @@ _curl_k8s() { mkdir -p "$bin_path" name="k8s" - ver=$(awk '/version: \"'"${K8S_VER}"'\"/{f=1;next} f && /'"${name}"':/{gsub("\"","",$2);print $2;exit}' "${ROOT_DIR}"/env/offline-manifest.yaml) + ver=$(awk \ + '/version: \"'"${K8S_VER}"'\"/{f=1;next} f && /'"${name}"':/{gsub("\"","",$2);print $2;exit}' \ + "${ROOT_DIR}"/env/offline-manifest.yaml) file="${name}-${ver}.tgz" url=${REPO_URL:-}/${file} if curl -sSfL "${url}" -o "${bin_path}/${file}" -m "360"; then @@ -88,7 +96,9 @@ _curl_k8s() { fi name="crictl" - ver=$(awk '/version: \"'"${K8S_VER}"'\"/{f=1;next} f && /'"${name}"':/{gsub("\"","",$2);print $2;exit}' "${ROOT_DIR}"/env/offline-manifest.yaml) + ver=$(awk \ + '/version: \"'"${K8S_VER}"'\"/{f=1;next} f && /'"${name}"':/{gsub("\"","",$2);print $2;exit}' \ + "${ROOT_DIR}"/env/offline-manifest.yaml) file="${name}-${ver}.tgz" url="${REPO_URL}/${file}" if curl -sSfL "${url}" -o "${bin_path}/${file}" -m "360"; then @@ -98,7 +108,9 @@ _curl_k8s() { fi name="cni-plugins" - ver=$(awk '/version: \"'"${K8S_VER}"'\"/{f=1;next} f && /'"${name}"':/{gsub("\"","",$2);print $2;exit}' "${ROOT_DIR}"/env/offline-manifest.yaml) + ver=$(awk \ + '/version: \"'"${K8S_VER}"'\"/{f=1;next} f && /'"${name}"':/{gsub("\"","",$2);print $2;exit}' \ + "${ROOT_DIR}"/env/offline-manifest.yaml) file="${name}-${ver}.tgz" url="${REPO_URL}/${file}" if curl -sSfL "${url}" -o "${bin_path}/${file}" -m "360"; then @@ -141,6 +153,28 @@ _offline_k8s() { fi } +config_crictl() { + local sock config_file + config_file="/etc/crictl.yaml" + [[ -f $config_file ]] || touch $config_file + case "${CRI_TYPE,,}" in + "docker") + sock="unix:///var/run/dockershim.sock" + ;; + "containerd") + sock="unix:///run/containerd/containerd.sock" + ;; + *) + # ToDo: Unified standard error code + export ERR_CODE=1 + utils::log FATAL "unkown CRI_TYPE:$CRI_TYPE" + ;; + esac + crictl config runtime-endpoint $sock + crictl config image-endpoint $sock + "${ROOT_DIR}"/k8s/operate_completion crictl +} + main() { local source_files source_files=("${ROOT_DIR}/functions/utils.sh" "${ROOT_DIR}/env/bcs.env") @@ -164,6 +198,8 @@ main() { esac fi + config_crictl + utils::log "INFO" "check kubeadm status" if kubeadm version -o short; then utils::log "OK" "kubeadm install success" diff --git a/bcs-ops/k8s/operate_completion b/bcs-ops/k8s/operate_completion index e01aa42e3f..0421258189 100755 --- a/bcs-ops/k8s/operate_completion +++ b/bcs-ops/k8s/operate_completion @@ -22,7 +22,7 @@ ROOT_DIR="${SELF_DIR}/.." RC_FILE="/etc/bash_completion.d/bcs.bash" -PROJECTS=(kubeadm kubectl helm ctr yq) +PROJECTS=(kubeadm kubectl crictl helm ctr yq) readonly SELF_DIR ROOT_DIR RC_FILE PROJECTS usage_and_exit() { @@ -140,6 +140,18 @@ source <(kubeadm completion bash) EOF } +completion_crictl() { + check_completion + sed -ri \ + '/bcs config begin for crictl/,/bcs config end for crictl/d' ${RC_FILE} + cat >>"$RC_FILE" <<'EOF' +# bcs config begin for crictl +# crictl 命令补全 +source <(crictl completion bash) +# bcs config end for crictl +EOF +} + safe_source() { local source_file=$1 if [[ -f ${source_file} ]]; then diff --git a/bcs-ops/k8s/optimize_k8s b/bcs-ops/k8s/optimize_k8s old mode 100644 new mode 100755 index a9f55642ad..12f5424ddf --- a/bcs-ops/k8s/optimize_k8s +++ b/bcs-ops/k8s/optimize_k8s @@ -30,153 +30,137 @@ safe_source() { fi } - -source_files=("${ROOT_DIR}/functions/utils.sh" "${ROOT_DIR}/functions/k8s.sh" "${ROOT_DIR}/env/bcs.env") +source_files=("${ROOT_DIR}/functions/utils.sh" "${ROOT_DIR}/functions/k8s.sh" + "${ROOT_DIR}/env/bcs.env") for file in "${source_files[@]}"; do safe_source "$file" done #start configuration -goversion=`kubectl version|grep "Server Version:"|grep -E "go[0-9]{1}.[0-9]{2}" -o|awk -F'.' ' { print $2 } '` -if [ -z "${goversion}" ];then +goversion=$(kubectl version -o yaml | yq '.serverVersion.goVersion' \ + | awk -F'.' '{print $2}') +if [ -z "${goversion}" ]; then job_fail "get go version failed, configure etcd failed" fi tmp_dir="/tmp/backup/$(date +%s)" -mkdir -p ${tmp_dir} -cp /etc/kubernetes/manifests/* ${tmp_dir}/ -cp /etc/kubernetes/manifests/* ${ROOT_DIR}/ -pod_files=(etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml) - -for pod_file in ${pod_files[@]};do - if [[ ${goversion} -le 15 ]] || [[ ${goversion} -ge 12 ]];then - if ! grep GODEBUG ${ROOT_DIR}/${pod_file};then - if [[ $(yq '.spec.containers[0].env' ${ROOT_DIR}/${pod_file}) != "null" ]];then - env_length=$(yq '.spec.containers[0].env|to_entries|length' ${ROOT_DIR}/${pod_file}) - yq -i '.spec.containers[0].env['${env_length}']={"name":"GODEBUG", "value":"madvdontneed=1"}' ${ROOT_DIR}/${pod_file} +mkdir -p "${tmp_dir}" +cp /etc/kubernetes/manifests/* "${tmp_dir}"/ +cp /etc/kubernetes/manifests/* "${ROOT_DIR}"/ +pod_files=(etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml + kube-scheduler.yaml) + +for pod_file in "${pod_files[@]}"; do + if ((goversion <= 15)) || ((goversion >= 12)); then + if ! grep GODEBUG "${ROOT_DIR}/${pod_file}"; then + if [[ $(yq '.spec.containers[0].env' "${ROOT_DIR}/${pod_file}") != "null" ]]; then + env_length=$(yq '.spec.containers[0].env|to_entries|length' "${ROOT_DIR}/${pod_file}") + yq -i '.spec.containers[0].env['"${env_length}"']={"name":"GODEBUG", "value":"madvdontneed=1"}' "${ROOT_DIR}/${pod_file}" else - yq -i '.spec.containers[0].env[0]={"name":"GODEBUG", "value":"madvdontneed=1"}' ${ROOT_DIR}/${pod_file} + yq -i '.spec.containers[0].env[0]={"name":"GODEBUG", "value":"madvdontneed=1"}' "${ROOT_DIR}/${pod_file}" fi fi fi done -if ! grep -v "^#" ${ROOT_DIR}/kube-apiserver.yaml|grep max-mutating-requests-inflight;then - yq -i '.spec.containers[0].command += "--max-mutating-requests-inflight=1000"' ${ROOT_DIR}/kube-apiserver.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-apiserver.yaml | grep max-mutating-requests-inflight; then + yq -i '.spec.containers[0].command += "--max-mutating-requests-inflight=1000"' \ + "${ROOT_DIR}"/kube-apiserver.yaml else - if ! grep max-mutating-requests-inflight=1000 ${ROOT_DIR}/kube-apiserver.yaml;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-apiserver.yaml|yq '.[]|select (.value|test("max-mutating-requests-inflight")).key') - yq -i '.spec.containers[0].command['${element_index}']="--max-mutating-requests-inflight=1000"' ${ROOT_DIR}/kube-apiserver.yaml - fi + if ! grep max-mutating-requests-inflight=1000 "${ROOT_DIR}"/kube-apiserver.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' \ + "${ROOT_DIR}"/kube-apiserver.yaml \ + | yq '.[]|select (.value|test("max-mutating-requests-inflight")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--max-mutating-requests-inflight=1000"' "${ROOT_DIR}"/kube-apiserver.yaml + fi fi -if ! grep -v "^#" ${ROOT_DIR}/kube-apiserver.yaml |grep max-requests-inflight;then - yq -i '.spec.containers[0].command += "--max-requests-inflight=3000"' ${ROOT_DIR}/kube-apiserver.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-apiserver.yaml | grep max-requests-inflight; then + yq -i '.spec.containers[0].command += "--max-requests-inflight=3000"' "${ROOT_DIR}"/kube-apiserver.yaml else - if ! grep max-requests-inflight=3000 ${ROOT_DIR}/kube-apiserver.yaml ;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-apiserver.yaml|yq '.[]|select (.value|test("max-mutating-requests-inflight")).key') - yq -i '.spec.containers[0].command['${element_index}']="--max-requests-inflight=3000"' ${ROOT_DIR}/kube-apiserver.yaml - fi + if ! grep max-requests-inflight=3000 "${ROOT_DIR}"/kube-apiserver.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' "${ROOT_DIR}"/kube-apiserver.yaml | yq '.[]|select (.value|test("max-mutating-requests-inflight")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--max-requests-inflight=3000"' "${ROOT_DIR}"/kube-apiserver.yaml + fi fi -if ! grep -v "^#" ${ROOT_DIR}/kube-controller-manager.yaml|grep kube-api-qps;then - yq -i '.spec.containers[0].command += "--kube-api-qps=300"' ${ROOT_DIR}/kube-controller-manager.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-controller-manager.yaml | grep kube-api-qps; then + yq -i '.spec.containers[0].command += "--kube-api-qps=300"' "${ROOT_DIR}"/kube-controller-manager.yaml else - if ! grep kube-api-qps=300 ${ROOT_DIR}/kube-controller-manager.yaml ;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-controller-manager.yaml|yq '.[]|select (.value|test("kube-api-qps")).key') - yq -i '.spec.containers[0].command['${element_index}']="--kube-api-qps=300"' ${ROOT_DIR}/kube-controller-manager.yaml - fi + if ! grep kube-api-qps=300 "${ROOT_DIR}"/kube-controller-manager.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' "${ROOT_DIR}"/kube-controller-manager.yaml | yq '.[]|select (.value|test("kube-api-qps")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--kube-api-qps=300"' "${ROOT_DIR}"/kube-controller-manager.yaml + fi fi -if ! grep -v "^#" ${ROOT_DIR}/kube-controller-manager.yaml |grep kube-api-burst;then - yq -i '.spec.containers[0].command += "--kube-api-burst=400"' ${ROOT_DIR}/kube-controller-manager.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-controller-manager.yaml | grep kube-api-burst; then + yq -i '.spec.containers[0].command += "--kube-api-burst=400"' "${ROOT_DIR}"/kube-controller-manager.yaml else - if ! grep kube-api-burst=400 ${ROOT_DIR}/kube-controller-manager.yaml;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-controller-manager.yaml|yq '.[]|select (.value|test("kube-api-burst")).key') - yq -i '.spec.containers[0].command['${element_index}']="--kube-api-burst=400"' ${ROOT_DIR}/kube-controller-manager.yaml - fi + if ! grep kube-api-burst=400 "${ROOT_DIR}"/kube-controller-manager.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' "${ROOT_DIR}"/kube-controller-manager.yaml | yq '.[]|select (.value|test("kube-api-burst")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--kube-api-burst=400"' "${ROOT_DIR}"/kube-controller-manager.yaml + fi fi -if ! grep -v "^#" ${ROOT_DIR}/kube-controller-manager.yaml|grep terminated-pod-gc-threshold;then - yq -i '.spec.containers[0].command += "--terminated-pod-gc-threshold=12500"' ${ROOT_DIR}/kube-controller-manager.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-controller-manager.yaml | grep terminated-pod-gc-threshold; then + yq -i '.spec.containers[0].command += "--terminated-pod-gc-threshold=12500"' "${ROOT_DIR}"/kube-controller-manager.yaml else - if ! grep terminated-pod-gc-threshold=12500 ${ROOT_DIR}/kube-controller-manager.yaml;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-controller-manager.yaml|yq '.[]|select (.value|test("terminated-pod-gc-threshold")).key') - yq -i '.spec.containers[0].command['${element_index}']="--terminated-pod-gc-threshold=12500"' ${ROOT_DIR}/kube-controller-manager.yaml - fi + if ! grep terminated-pod-gc-threshold=12500 "${ROOT_DIR}"/kube-controller-manager.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' "${ROOT_DIR}"/kube-controller-manager.yaml | yq '.[]|select (.value|test("terminated-pod-gc-threshold")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--terminated-pod-gc-threshold=12500"' "${ROOT_DIR}"/kube-controller-manager.yaml + fi fi -if ! grep -v "^#" ${ROOT_DIR}/kube-scheduler.yaml|grep kube-api-qps;then - yq -i '.spec.containers[0].command += "--kube-api-qps=300"' ${ROOT_DIR}/kube-scheduler.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-scheduler.yaml | grep kube-api-qps; then + yq -i '.spec.containers[0].command += "--kube-api-qps=300"' "${ROOT_DIR}"/kube-scheduler.yaml else - if ! grep kube-api-qps=300 ${ROOT_DIR}/kube-scheduler.yaml;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-scheduler.yaml|yq '.[]|select (.value|test("kube-api-qps")).key') - yq -i '.spec.containers[0].command['${element_index}']="--kube-api-qps=300"' ${ROOT_DIR}/kube-scheduler.yaml - fi + if ! grep kube-api-qps=300 "${ROOT_DIR}"/kube-scheduler.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' "${ROOT_DIR}"/kube-scheduler.yaml | yq '.[]|select (.value|test("kube-api-qps")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--kube-api-qps=300"' "${ROOT_DIR}"/kube-scheduler.yaml + fi fi -if ! grep -v "^#" ${ROOT_DIR}/kube-scheduler.yaml |grep kube-api-burst;then - yq -i '.spec.containers[0].command += "--kube-api-burst=400"' ${ROOT_DIR}/kube-scheduler.yaml +if ! grep -v "^#" "${ROOT_DIR}"/kube-scheduler.yaml | grep kube-api-burst; then + yq -i '.spec.containers[0].command += "--kube-api-burst=400"' "${ROOT_DIR}"/kube-scheduler.yaml else - if ! grep kube-api-burst=400 ${ROOT_DIR}/kube-scheduler.yaml;then - element_index=$(yq '.spec.containers[0].command|to_entries' ${ROOT_DIR}/kube-scheduler.yaml|yq '.[]|select (.value|test("kube-api-burst")).key') - yq -i '.spec.containers[0].command['${element_index}']="--kube-api-burst=400"' ${ROOT_DIR}/kube-scheduler.yaml - fi + if ! grep kube-api-burst=400 "${ROOT_DIR}"/kube-scheduler.yaml; then + element_index=$(yq '.spec.containers[0].command|to_entries' "${ROOT_DIR}"/kube-scheduler.yaml | yq '.[]|select (.value|test("kube-api-burst")).key') + yq -i '.spec.containers[0].command['"${element_index}"']="--kube-api-burst=400"' "${ROOT_DIR}"/kube-scheduler.yaml + fi fi -for pod_file in ${pod_files[@]};do - cp ${ROOT_DIR}/${pod_file} /etc/kubernetes/manifests/ +for pod_file in "${pod_files[@]}"; do + cp "${ROOT_DIR}/${pod_file}" /etc/kubernetes/manifests/ done k8s::restart_kubelet sleep 30 -pods=(etcd kube-apiserver kube-controller-manager kube-scheduler) -for pod in ${pods[@]};do - case "${CRI_TYPE,,}" in - "docker") - if ! docker ps |grep -v pause|grep ${pod}|grep -i Up;then - utils::log "ERROR" "${pod} fail to run " - fi - ;; - "containerd") - if ! crictl --runtime-endpoint=unix:///run/containerd/containerd.sock ps \ - | grep "${pod}" \ - | grep -i running; then - utils::log "ERROR" "${pod} fail to run " - fi - ;; - *) - export ERR_CODE=1 - utils::log "FATAL" "unkown CRI_TYPE: $CRI_TYPE" - ;; - esac -done +k8s::check_master if [[ -z ${MASTER_JOIN_CMD:-} ]]; then - kubectl get cm -n kube-system kube-proxy -o yaml > ${tmp_dir}/kube-proxy-cm.yaml - kubectl get ds -n kube-system kube-proxy -o yaml > ${tmp_dir}/kube-proxy.yaml + kubectl get cm -n kube-system kube-proxy -o yaml >"${tmp_dir}"/kube-proxy-cm.yaml + kubectl get ds -n kube-system kube-proxy -o yaml >"${tmp_dir}"/kube-proxy.yaml - kubectl get cm -n kube-system kube-proxy -o yaml|yq '.data.["kubeconfig.conf"]' > ${ROOT_DIR}/kubeconfig.conf - kubectl get cm -n kube-system kube-proxy -o yaml|yq '.data.["config.conf"]'|yq '.ipvs.udpTimeout="10s"' > ${ROOT_DIR}/config.conf + kubectl get cm -n kube-system kube-proxy -o yaml | yq '.data.["kubeconfig.conf"]' >"${ROOT_DIR}"/kubeconfig.conf + kubectl get cm -n kube-system kube-proxy -o yaml | yq '.data.["config.conf"]' | yq '.ipvs.udpTimeout="10s"' >"${ROOT_DIR}"/config.conf kubectl delete cm kube-proxy -n kube-system - kubectl create cm kube-proxy -n kube-system --from-file config.conf --from-file kubeconfig.conf + kubectl create cm kube-proxy -n kube-system --from-file "${ROOT_DIR}"/config.conf --from-file "${ROOT_DIR}"/kubeconfig.conf - if ! kubectl get ds -n kube-system kube-proxy -o yaml|grep madvdontneed;then - kubectl patch ds -n kube-system kube-proxy -p '[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", "value":{"name":"GODEBUG", "value":"madvdontneed=1"}}]' --type json + if ! kubectl get ds -n kube-system kube-proxy -o yaml | grep madvdontneed; then + kubectl patch ds -n kube-system kube-proxy -p '[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", "value":{"name":"GODEBUG", "value":"madvdontneed=1"}}]' --type json else - if ! kubectl get ds -n kube-system kube-proxy -o yaml|grep madvdontneed=1;then - element_index=`kubectl get ds -n kube-system kube-proxy -o json|jq '.spec.template.spec.containers[0].env|to_entries[]|select (.value.name|test("GODEBUG")).key'` - kubectl patch ds -n kube-system kube-proxy -p '[{"op": "replace", "path": "/spec/template/spec/containers/0/env/'${element_index}'", "value":{"name":"GODEBUG", "value":"madvdontneed=1"}}]' --type json + if ! kubectl get ds -n kube-system kube-proxy -o yaml | grep madvdontneed=1; then + element_index=$(kubectl get ds -n kube-system kube-proxy -o json | jq '.spec.template.spec.containers[0].env|to_entries[]|select (.value.name|test("GODEBUG")).key') + kubectl patch ds -n kube-system kube-proxy -p '[{"op": "replace", "path": "/spec/template/spec/containers/0/env/'"${element_index}"'", "value":{"name":"GODEBUG", "value":"madvdontneed=1"}}]' --type json fi fi kubectl rollout restart ds -n kube-system kube-proxy - if ! kubectl rollout status ds -n kube-system kube-proxy --timeout 60s;then + if ! kubectl rollout status ds -n kube-system kube-proxy --timeout 60s; then utils::log "FATAL" "Update kube-proxy failed." fi fi - #coredns configuration utils::log "OK" "K8S configuration done!" diff --git a/bcs-ops/k8s/render_kubeadm b/bcs-ops/k8s/render_kubeadm index fdbc5d9a93..e22f56b3e1 100755 --- a/bcs-ops/k8s/render_kubeadm +++ b/bcs-ops/k8s/render_kubeadm @@ -148,7 +148,13 @@ EOF } render_kubelet() { - true + local config_file=$1 + cat >"${config_file}" <"${config_file}" + render_kubelet /tmp/kubelet-config + cat /tmp/cluster-config /tmp/init-join-config /tmp/proxy-config /tmp/kubelet-config >"${config_file}" cat "${config_file}" utils::log "OK" "${config_file} rendered" - rm -f /tmp/cluster-config /tmp/init-join-config /tmp/proxy-config + rm -f /tmp/cluster-config /tmp/init-join-config /tmp/proxy-config /tmp/kubelet-config } main