From cf8a477633ed9bcf88c12bbe96ca65f0df36541f Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Thu, 16 Jan 2025 14:53:49 +0200 Subject: [PATCH 01/24] CLOUD-875 [PS] Combine xxx-latest and an xxx-version pipelines to use common Groovy code --- ...perator-eks-latest.yml => job-pso-eks.yml} | 4 +- ...-operator-minikube.yml => job-pso-gke.yml} | 4 +- ...or-gke-latest.yml => job-pso-minikube.yml} | 4 +- ...atest-scheduler.yml => job-weekly-pso.yml} | 5 +- cloud/jenkins/ps-operator-eks-version.yml | 14 - cloud/jenkins/ps-operator-gke-version.yml | 21 - .../ps-operator-latest-scheduler.groovy | 119 ---- cloud/jenkins/ps_operator_eks_version.groovy | 514 ------------------ cloud/jenkins/ps_operator_gke_latest.groovy | 478 ---------------- ...rator_eks_latest.groovy => pso-eks.groovy} | 301 +++++----- ...ator_gke_version.groovy => pso-gke.groovy} | 286 +++++----- ...or_minikube.groovy => pso-minikube.groovy} | 256 +++++---- cloud/jenkins/weekly-pso.groovy | 41 ++ 13 files changed, 426 insertions(+), 1621 deletions(-) rename cloud/jenkins/{ps-operator-eks-latest.yml => job-pso-eks.yml} (77%) rename cloud/jenkins/{ps-operator-minikube.yml => job-pso-gke.yml} (84%) rename cloud/jenkins/{ps-operator-gke-latest.yml => job-pso-minikube.yml} (83%) rename cloud/jenkins/{ps-operator-latest-scheduler.yml => job-weekly-pso.yml} (77%) delete mode 100644 cloud/jenkins/ps-operator-eks-version.yml delete mode 100644 cloud/jenkins/ps-operator-gke-version.yml delete mode 100644 cloud/jenkins/ps-operator-latest-scheduler.groovy delete mode 100644 cloud/jenkins/ps_operator_eks_version.groovy delete mode 100644 cloud/jenkins/ps_operator_gke_latest.groovy rename cloud/jenkins/{ps_operator_eks_latest.groovy => pso-eks.groovy} (66%) rename cloud/jenkins/{ps_operator_gke_version.groovy => pso-gke.groovy} (62%) rename cloud/jenkins/{ps_operator_minikube.groovy => pso-minikube.groovy} (56%) create mode 100644 cloud/jenkins/weekly-pso.groovy diff --git a/cloud/jenkins/ps-operator-eks-latest.yml b/cloud/jenkins/job-pso-eks.yml similarity index 77% rename from cloud/jenkins/ps-operator-eks-latest.yml rename to cloud/jenkins/job-pso-eks.yml index 6533d7cafc..39cb839ae7 100644 --- a/cloud/jenkins/ps-operator-eks-latest.yml +++ b/cloud/jenkins/job-pso-eks.yml @@ -1,5 +1,5 @@ - job: - name: ps-operator-eks-latest + name: pso-eks project-type: pipeline description: | Do not edit this job through the web! @@ -11,4 +11,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/ps_operator_eks_latest.groovy + script-path: cloud/jenkins/pso-eks.groovy diff --git a/cloud/jenkins/ps-operator-minikube.yml b/cloud/jenkins/job-pso-gke.yml similarity index 84% rename from cloud/jenkins/ps-operator-minikube.yml rename to cloud/jenkins/job-pso-gke.yml index 78698ce88c..8ebd0d4d82 100644 --- a/cloud/jenkins/ps-operator-minikube.yml +++ b/cloud/jenkins/job-pso-gke.yml @@ -1,5 +1,5 @@ - job: - name: ps-operator-minikube + name: pso-gke project-type: pipeline description: | Do not edit this job through the web! @@ -18,4 +18,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/ps_operator_minikube.groovy + script-path: cloud/jenkins/pso-gke.groovy diff --git a/cloud/jenkins/ps-operator-gke-latest.yml b/cloud/jenkins/job-pso-minikube.yml similarity index 83% rename from cloud/jenkins/ps-operator-gke-latest.yml rename to cloud/jenkins/job-pso-minikube.yml index 702116970c..41ead27a33 100644 --- a/cloud/jenkins/ps-operator-gke-latest.yml +++ b/cloud/jenkins/job-pso-minikube.yml @@ -1,5 +1,5 @@ - job: - name: ps-operator-gke-latest + name: pso-minikube project-type: pipeline description: | Do not edit this job through the web! @@ -18,4 +18,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/ps_operator_gke_latest.groovy + script-path: cloud/jenkins/pso-minikube.groovy diff --git a/cloud/jenkins/ps-operator-latest-scheduler.yml b/cloud/jenkins/job-weekly-pso.yml similarity index 77% rename from cloud/jenkins/ps-operator-latest-scheduler.yml rename to cloud/jenkins/job-weekly-pso.yml index e13de9639d..a5e95f9ea6 100644 --- a/cloud/jenkins/ps-operator-latest-scheduler.yml +++ b/cloud/jenkins/job-weekly-pso.yml @@ -1,5 +1,5 @@ - job: - name: ps-operator-latest-scheduler + name: weekly-pso project-type: pipeline description: | Do not edit this job through the web! @@ -13,5 +13,4 @@ - 'master' wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/ps-operator-latest-scheduler.groovy - + script-path: cloud/jenkins/weekly-pso.groovy \ No newline at end of file diff --git a/cloud/jenkins/ps-operator-eks-version.yml b/cloud/jenkins/ps-operator-eks-version.yml deleted file mode 100644 index 85d2f0661d..0000000000 --- a/cloud/jenkins/ps-operator-eks-version.yml +++ /dev/null @@ -1,14 +0,0 @@ -- job: - name: ps-operator-eks-version - project-type: pipeline - description: | - Do not edit this job through the web! - pipeline-scm: - scm: - - git: - url: https://github.com/Percona-Lab/jenkins-pipelines.git - branches: - - master - wipe-workspace: false - lightweight-checkout: true - script-path: cloud/jenkins/ps_operator_eks_version.groovy diff --git a/cloud/jenkins/ps-operator-gke-version.yml b/cloud/jenkins/ps-operator-gke-version.yml deleted file mode 100644 index 65bcc4a4cc..0000000000 --- a/cloud/jenkins/ps-operator-gke-version.yml +++ /dev/null @@ -1,21 +0,0 @@ -- job: - name: ps-operator-gke-version - project-type: pipeline - description: | - Do not edit this job through the web! - concurrent: false - properties: - - build-discarder: - days-to-keep: -1 - num-to-keep: 10 - artifact-days-to-keep: -1 - artifact-num-to-keep: 10 - pipeline-scm: - scm: - - git: - url: https://github.com/Percona-Lab/jenkins-pipelines.git - branches: - - master - wipe-workspace: false - lightweight-checkout: true - script-path: cloud/jenkins/ps_operator_gke_version.groovy diff --git a/cloud/jenkins/ps-operator-latest-scheduler.groovy b/cloud/jenkins/ps-operator-latest-scheduler.groovy deleted file mode 100644 index 76521edc2e..0000000000 --- a/cloud/jenkins/ps-operator-latest-scheduler.groovy +++ /dev/null @@ -1,119 +0,0 @@ -library changelog: false, identifier: 'lib@master', retriever: modernSCM([ - $class: 'GitSCMSource', - remote: 'https://github.com/Percona-Lab/jenkins-pipelines.git' -]) _ - - -pipeline { - parameters { - choice( - choices: ['run-release.csv', 'run-distro.csv'], - description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.', - name: 'TEST_SUITE') - text( - defaultValue: '', - description: 'List of tests to run separated by new line', - name: 'TEST_LIST') - choice( - choices: 'NO\nYES', - description: 'Ignore passed tests in previous run (run all)', - name: 'IGNORE_PREVIOUS_RUN' - ) - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: 'latest', - description: 'GKE version', - name: 'PLATFORM_VER') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') - } - agent { - label 'docker' - } - options { - skipDefaultCheckout() - disableConcurrentBuilds() - buildDiscarder(logRotator(numToKeepStr: '10', artifactNumToKeepStr: '10')) - timestamps () - } - triggers { - cron('0 8 * * 0') - } - stages { - stage("Run parallel") { - parallel{ - - stage('Trigger ps-operator-gke-latest job 3 times') { - steps { - script { - for (int i = 1; i <= 3; i++) { - build job: 'ps-operator-gke-latest', propagate: false, wait: true, parameters: [string(name: 'TEST_SUITE', value: "${TEST_SUITE}"),string(name: 'TEST_LIST', value: "${TEST_LIST}"),string(name: 'IGNORE_PREVIOUS_RUN', value: "${IGNORE_PREVIOUS_RUN}"),string(name: 'GIT_BRANCH', value: "${GIT_BRANCH}"),string(name: 'GIT_REPO', value: "${GIT_REPO}"),string(name: 'PLATFORM_VER', value: "${PLATFORM_VER}"),string(name: 'OPERATOR_IMAGE', value: "${OPERATOR_IMAGE}"),string(name: 'IMAGE_MYSQL', value: "${IMAGE_MYSQL}"),string(name: 'IMAGE_ORCHESTRATOR', value: "${IMAGE_ORCHESTRATOR}"),string(name: 'IMAGE_ROUTER', value: "${IMAGE_ROUTER}"),string(name: 'IMAGE_BACKUP', value: "${IMAGE_BACKUP}"),string(name: 'IMAGE_TOOLKIT', value: "${IMAGE_TOOLKIT}"),string(name: 'IMAGE_HAPROXY', value: "${IMAGE_HAPROXY}"),string(name: 'IMAGE_PMM_CLIENT', value: "${IMAGE_PMM_CLIENT}"),string(name: 'IMAGE_PMM_SERVER', value: "${IMAGE_PMM_CLIENT}")] - } - } - } - } - - stage('Trigger ps-operator-eks-latest job 3 times') { - steps { - script { - for (int i = 1; i <= 3; i++) { - build job: 'ps-operator-eks-latest', propagate: false, wait: true, parameters: [string(name: 'TEST_SUITE', value: "${TEST_SUITE}"),string(name: 'TEST_LIST', value: "${TEST_LIST}"),string(name: 'IGNORE_PREVIOUS_RUN', value: "${IGNORE_PREVIOUS_RUN}"),string(name: 'GIT_BRANCH', value: "${GIT_BRANCH}"),string(name: 'GIT_REPO', value: "${GIT_REPO}"),string(name: 'PLATFORM_VER', value: "${PLATFORM_VER}"),string(name: 'OPERATOR_IMAGE', value: "${OPERATOR_IMAGE}"),string(name: 'IMAGE_MYSQL', value: "${IMAGE_MYSQL}"),string(name: 'IMAGE_ORCHESTRATOR', value: "${IMAGE_ORCHESTRATOR}"),string(name: 'IMAGE_ROUTER', value: "${IMAGE_ROUTER}"),string(name: 'IMAGE_BACKUP', value: "${IMAGE_BACKUP}"),string(name: 'IMAGE_TOOLKIT', value: "${IMAGE_TOOLKIT}"),string(name: 'IMAGE_HAPROXY', value: "${IMAGE_HAPROXY}"),string(name: 'IMAGE_PMM_CLIENT', value: "${IMAGE_PMM_CLIENT}"),string(name: 'IMAGE_PMM_SERVER', value: "${IMAGE_PMM_CLIENT}")] - } - } - } - } - } - } - } - post { - always { - - copyArtifacts(projectName: 'ps-operator-gke-latest', selector: lastCompleted(), target: 'ps-operator-gke-latest') - copyArtifacts(projectName: 'ps-operator-eks-latest', selector: lastCompleted(), target: 'ps-operator-eks-latest') - archiveArtifacts '*/*.xml' - step([$class: 'JUnitResultArchiver', testResults: '*/*.xml', healthScaleFactor: 1.0]) - - } - } -} diff --git a/cloud/jenkins/ps_operator_eks_version.groovy b/cloud/jenkins/ps_operator_eks_version.groovy deleted file mode 100644 index 86cd5de840..0000000000 --- a/cloud/jenkins/ps_operator_eks_version.groovy +++ /dev/null @@ -1,514 +0,0 @@ -region='eu-west-2' -tests=[] -clusters=[] - -void prepareNode() { - echo "=========================[ Installing tools on the Jenkins executor ]=========================" - sh """ - sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl - kubectl version --client --output=yaml - - curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm - - sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq - sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq - - curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - - ./krew-linux_amd64 install krew - export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" - - kubectl krew install assert - - # v0.17.0 kuttl version - kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml - echo \$(kubectl kuttl --version) is installed - - curl -sL https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_\$(uname -s)_amd64.tar.gz | sudo tar -C /usr/local/bin -xzf - && sudo chmod +x /usr/local/bin/eksctl - """ -} - -void prepareSources() { - if ("$PLATFORM_VER" == "latest") { - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - USED_PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim() - } - } else { - USED_PLATFORM_VER="$PLATFORM_VER" - } - echo "USED_PLATFORM_VER=$USED_PLATFORM_VER" - - echo "=========================[ Cloning the sources ]=========================" - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' - sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source - cloud/local/checkout $GIT_REPO $GIT_BRANCH - """ - - script { - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-ver-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim() - } -} - -void dockerBuildPush() { - echo "=========================[ Building and Pushing the operator Docker image ]=========================" - withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { - sh """ - if [[ "$OPERATOR_IMAGE" ]]; then - echo "SKIP: Build is not needed, operator image was set!" - else - cd source - sg docker -c " - docker login -u '$USER' -p '$PASS' - export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH - e2e-tests/build - docker logout - " - sudo rm -rf build - fi - """ - } -} - -void initTests() { - echo "=========================[ Initializing the tests ]=========================" - - echo "Populating tests into the tests array!" - def testList = "$TEST_LIST" - def suiteFileName = "source/e2e-tests/$TEST_SUITE" - - if (testList.length() != 0) { - suiteFileName = 'source/e2e-tests/run-custom.csv' - sh """ - echo -e "$testList" > $suiteFileName - echo "Custom test suite contains following tests:" - cat $suiteFileName - """ - } - - def records = readCSV file: suiteFileName - - for (int i=0; i/dev/null 2>&1", returnStatus: true) - - if (retFileExists == 0) { - tests[i]["result"] = "passed" - } - } - } else { - sh """ - aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || : - """ - } - } - - withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { - sh """ - cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml - chmod 600 source/e2e-tests/conf/cloud-secret.yml - """ - } - stash includes: "source/**", name: "sourceFILES" -} - -void clusterRunner(String cluster) { - def clusterCreated=0 - - for (int i=0; i= 1) { - shutdownCluster(cluster) - } -} - -void createCluster(String CLUSTER_SUFFIX) { - clusters.add("$CLUSTER_SUFFIX") - - sh """ - timestamp="\$(date +%s)" -tee cluster-${CLUSTER_SUFFIX}.yaml << EOF -# An example of ClusterConfig showing nodegroups with mixed instances (spot and on demand): ---- -apiVersion: eksctl.io/v1alpha5 -kind: ClusterConfig - -metadata: - name: $CLUSTER_NAME-$CLUSTER_SUFFIX - region: $region - version: "$USED_PLATFORM_VER" - tags: - 'delete-cluster-after-hours': '10' - 'creation-time': '\$timestamp' - 'team': 'cloud' -iam: - withOIDC: true - -addons: -- name: aws-ebs-csi-driver - wellKnownPolicies: - ebsCSIController: true - -nodeGroups: - - name: ng-1 - minSize: 3 - maxSize: 5 - desiredCapacity: 3 - instanceType: "m5.xlarge" - iam: - attachPolicyARNs: - - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy - - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy - - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - arn:aws:iam::aws:policy/AmazonS3FullAccess - tags: - 'iit-billing-tag': 'jenkins-eks' - 'delete-cluster-after-hours': '10' - 'team': 'cloud' - 'product': 'ps-operator' -EOF - """ - - // this is needed for always post action because pipeline runs earch parallel step on another instance - stash includes: "cluster-${CLUSTER_SUFFIX}.yaml", name: "cluster-$CLUSTER_SUFFIX-config" - - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - sh """ - export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - export PATH=/home/ec2-user/.local/bin:\$PATH - eksctl create cluster -f cluster-${CLUSTER_SUFFIX}.yaml - kubectl annotate storageclass gp2 storageclass.kubernetes.io/is-default-class=true - kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user="\$(aws sts get-caller-identity|jq -r '.Arn')" - """ - } -} - -void runTest(Integer TEST_ID) { - def retryCount = 0 - def testName = tests[TEST_ID]["name"] - def clusterSuffix = tests[TEST_ID]["cluster"] - - waitUntil { - def timeStart = new Date().getTime() - try { - echo "The $testName test was started on cluster $CLUSTER_NAME-$clusterSuffix !" - tests[TEST_ID]["result"] = "failure" - - timeout(time: 90, unit: 'MINUTES') { - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd'], file(credentialsId: 'eks-conf-file', variable: 'EKS_CONF_FILE')]) { - sh """ - cd source - - [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator - [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH - export IMAGE_MYSQL=$IMAGE_MYSQL - export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR - export IMAGE_ROUTER=$IMAGE_ROUTER - export IMAGE_HAPROXY=$IMAGE_HAPROXY - export IMAGE_BACKUP=$IMAGE_BACKUP - export IMAGE_TOOLKIT=$IMAGE_TOOLKIT - export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT - export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER - export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix - export PATH=\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH - export PATH=/home/ec2-user/.local/bin:\$PATH - - kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$" - """ - } - } - pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") - tests[TEST_ID]["result"] = "passed" - return true - } - catch (exc) { - if (retryCount >= 1) { - currentBuild.result = 'FAILURE' - return true - } - retryCount++ - return false - } - finally { - def timeStop = new Date().getTime() - def durationSec = (timeStop - timeStart) / 1000 - tests[TEST_ID]["time"] = durationSec - echo "The $testName test was finished!" - } - } -} - -void pushArtifactFile(String FILE_NAME) { - echo "Push $FILE_NAME file to S3!" - - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - sh """ - touch $FILE_NAME - S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT - aws s3 ls \$S3_PATH/$FILE_NAME || : - aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || : - """ - } -} - -TestsReport = '\n' -void makeReport() { - echo "=========================[ Generating Test Report ]=========================" - for (int i=0; i<'+ testResult +'/>\n' - } - TestsReport = TestsReport + '\n' -} - -void shutdownCluster(String CLUSTER_SUFFIX) { - unstash "cluster-$CLUSTER_SUFFIX-config" - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - sh """ - export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - eksctl delete addon --name aws-ebs-csi-driver --cluster $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region || true - for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do - kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true - kubectl delete sts --all -n \$namespace --force --grace-period=0 || true - kubectl delete replicasets --all -n \$namespace --force --grace-period=0 || true - kubectl delete poddisruptionbudget --all -n \$namespace --force --grace-period=0 || true - kubectl delete services --all -n \$namespace --force --grace-period=0 || true - kubectl delete pods --all -n \$namespace --force --grace-period=0 || true - done - kubectl get svc --all-namespaces || true - - VPC_ID=\$(eksctl get cluster --name $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region -ojson | jq --raw-output '.[0].ResourcesVpcConfig.VpcId' || true) - if [ -n "\$VPC_ID" ]; then - LOADBALS=\$(aws elb describe-load-balancers --region $region --output json | jq --raw-output '.LoadBalancerDescriptions[] | select(.VPCId == "'\$VPC_ID'").LoadBalancerName') - for loadbal in \$LOADBALS; do - aws elb delete-load-balancer --load-balancer-name \$loadbal --region $region - done - eksctl delete cluster -f cluster-${CLUSTER_SUFFIX}.yaml --wait --force --disable-nodegroup-eviction || true - - VPC_DESC=\$(aws ec2 describe-vpcs --vpc-id \$VPC_ID --region $region || true) - if [ -n "\$VPC_DESC" ]; then - aws ec2 delete-vpc --vpc-id \$VPC_ID --region $region || true - fi - VPC_DESC=\$(aws ec2 describe-vpcs --vpc-id \$VPC_ID --region $region || true) - if [ -n "\$VPC_DESC" ]; then - for secgroup in \$(aws ec2 describe-security-groups --filters Name=vpc-id,Values=\$VPC_ID --query 'SecurityGroups[*].GroupId' --output text --region $region); do - aws ec2 delete-security-group --group-id \$secgroup --region $region || true - done - - aws ec2 delete-vpc --vpc-id \$VPC_ID --region $region || true - fi - fi - aws cloudformation delete-stack --stack-name eksctl-$CLUSTER_NAME-$CLUSTER_SUFFIX-cluster --region $region || true - aws cloudformation wait stack-delete-complete --stack-name eksctl-$CLUSTER_NAME-$CLUSTER_SUFFIX-cluster --region $region || true - - eksctl get cluster --name $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region || true - aws cloudformation list-stacks --region $region | jq '.StackSummaries[] | select(.StackName | startswith("'eksctl-$CLUSTER_NAME-$CLUSTER_SUFFIX-cluster'"))' || true - """ - } -} - -pipeline { - environment { - CLOUDSDK_CORE_DISABLE_PROMPTS = 1 - PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim() - } - parameters { - choice( - choices: ['run-release.csv', 'run-distro.csv'], - description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.', - name: 'TEST_SUITE') - text( - defaultValue: '', - description: 'List of tests to run separated by new line', - name: 'TEST_LIST') - choice( - choices: 'NO\nYES', - description: 'Ignore passed tests in previous run (run all)', - name: 'IGNORE_PREVIOUS_RUN' - ) - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: 'latest', - description: 'EKS kubernetes version', - name: 'PLATFORM_VER') - choice( - choices: 'YES\nNO', - description: 'Run tests in cluster wide mode', - name: 'CLUSTER_WIDE') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') - } - agent { - label 'docker' - } - options { - buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) - skipDefaultCheckout() - disableConcurrentBuilds() - copyArtifactPermission('ps-operator-latest-scheduler'); - } - stages { - stage('Prepare node') { - steps { - prepareNode() - prepareSources() - } - } - stage('Docker Build and Push') { - steps { - dockerBuildPush() - } - } - stage('Init tests') { - steps { - initTests() - } - } - stage('Run Tests') { - options { - timeout(time: 3, unit: 'HOURS') - } - parallel { - stage('cluster1') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster1') - } - } - stage('cluster2') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster2') - } - } - stage('cluster3') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster3') - } - } - stage('cluster4') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster4') - } - } - } - - } - } - post { - always { - echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","") - makeReport() - sh """ - echo "$TestsReport" > TestsReport.xml - """ - step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) - archiveArtifacts '*.xml' - - script { - if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { - slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" - } - - clusters.each { shutdownCluster(it) } - } - - sh """ - sudo docker system prune --volumes -af - sudo rm -rf * - """ - deleteDir() - } - } -} diff --git a/cloud/jenkins/ps_operator_gke_latest.groovy b/cloud/jenkins/ps_operator_gke_latest.groovy deleted file mode 100644 index 7722188f3b..0000000000 --- a/cloud/jenkins/ps_operator_gke_latest.groovy +++ /dev/null @@ -1,478 +0,0 @@ -region='us-central1-a' -tests=[] -clusters=[] - -void prepareNode() { - echo "=========================[ Installing tools on the Jenkins executor ]=========================" - sh """ - sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl - kubectl version --client --output=yaml - - curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm - - sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq - sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq - - curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - - ./krew-linux_amd64 install krew - export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" - - kubectl krew install assert - - # v0.17.0 kuttl version - kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml - echo \$(kubectl kuttl --version) is installed - - sudo tee /etc/yum.repos.d/google-cloud-sdk.repo << EOF -[google-cloud-cli] -name=Google Cloud CLI -baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64 -enabled=1 -gpgcheck=1 -repo_gpgcheck=0 -gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg -EOF - sudo yum install -y google-cloud-cli google-cloud-cli-gke-gcloud-auth-plugin - """ - - echo "=========================[ Logging in the Kubernetes provider ]=========================" - withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { - sh """ - gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE - gcloud config set project $GCP_PROJECT - """ - } -} - -void prepareSources() { - if ("$PLATFORM_VER" == "latest") { - USED_PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", , returnStdout: true).trim() - } else { - USED_PLATFORM_VER="$PLATFORM_VER" - } - echo "USED_PLATFORM_VER=$USED_PLATFORM_VER" - - echo "=========================[ Cloning the sources ]=========================" - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' - sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source - cloud/local/checkout $GIT_REPO $GIT_BRANCH - """ - - script { - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-lat-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim() - } -} - -void dockerBuildPush() { - echo "=========================[ Building and Pushing the operator Docker image ]=========================" - withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { - sh """ - if [[ "$OPERATOR_IMAGE" ]]; then - echo "SKIP: Build is not needed, operator image was set!" - else - cd source - sg docker -c " - docker login -u '$USER' -p '$PASS' - export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH - e2e-tests/build - docker logout - " - sudo rm -rf build - fi - """ - } -} - -void initTests() { - echo "=========================[ Initializing the tests ]=========================" - - echo "Populating tests into the tests array!" - def testList = "$TEST_LIST" - def suiteFileName = "source/e2e-tests/$TEST_SUITE" - - if (testList.length() != 0) { - suiteFileName = 'source/e2e-tests/run-custom.csv' - sh """ - echo -e "$testList" > $suiteFileName - echo "Custom test suite contains following tests:" - cat $suiteFileName - """ - } - - def records = readCSV file: suiteFileName - - for (int i=0; i/dev/null 2>&1", returnStatus: true) - - if (retFileExists == 0) { - tests[i]["result"] = "passed" - } - } - } else { - sh """ - aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || : - """ - } - } - - withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { - sh """ - cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml - """ - } - stash includes: "source/**", name: "sourceFILES" -} - -void clusterRunner(String cluster) { - def clusterCreated=0 - - for (int i=0; i= 1) { - shutdownCluster(cluster) - } -} - -void createCluster(String CLUSTER_SUFFIX) { - clusters.add("$CLUSTER_SUFFIX") - - withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { - sh """ - export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - - maxRetries=15 - exitCode=1 - while [[ \$exitCode != 0 && \$maxRetries > 0 ]]; do - ret_val=0 - gcloud container clusters create $CLUSTER_NAME-$CLUSTER_SUFFIX \ - --zone $region \ - --cluster-version $USED_PLATFORM_VER \ - --machine-type n1-standard-4 \ - --preemptible \ - --disk-size 30 \ - --num-nodes=3 \ - --network=jenkins-ps-vpc \ - --subnetwork=jenkins-ps-$CLUSTER_SUFFIX \ - --no-enable-autoupgrade \ - --cluster-ipv4-cidr=/21 \ - --labels delete-cluster-after-hours=6 &&\ - kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user jenkins@"$GCP_PROJECT".iam.gserviceaccount.com - exitCode=\$? - if [[ \$exitCode == 0 ]]; then break; fi - (( maxRetries -- )) - sleep 1 - done - if [[ \$exitCode != 0 ]]; then exit \$exitCode; fi - """ - } -} - -void runTest(Integer TEST_ID) { - def retryCount = 0 - def testName = tests[TEST_ID]["name"] - def clusterSuffix = tests[TEST_ID]["cluster"] - - waitUntil { - def timeStart = new Date().getTime() - try { - echo "The $testName test was started on cluster $CLUSTER_NAME-$clusterSuffix !" - tests[TEST_ID]["result"] = "failure" - - timeout(time: 90, unit: 'MINUTES') { - sh """ - cd source - - [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator - [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH - export IMAGE_MYSQL=$IMAGE_MYSQL - export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR - export IMAGE_ROUTER=$IMAGE_ROUTER - export IMAGE_HAPROXY=$IMAGE_HAPROXY - export IMAGE_BACKUP=$IMAGE_BACKUP - export IMAGE_TOOLKIT=$IMAGE_TOOLKIT - export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT - export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER - export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix - export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" - - kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$" - """ - } - pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") - tests[TEST_ID]["result"] = "passed" - return true - } - catch (exc) { - if (retryCount >= 1) { - currentBuild.result = 'FAILURE' - return true - } - retryCount++ - return false - } - finally { - def timeStop = new Date().getTime() - def durationSec = (timeStop - timeStart) / 1000 - tests[TEST_ID]["time"] = durationSec - echo "The $testName test was finished!" - } - } -} - -void pushArtifactFile(String FILE_NAME) { - echo "Push $FILE_NAME file to S3!" - - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - sh """ - touch $FILE_NAME - S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT - aws s3 ls \$S3_PATH/$FILE_NAME || : - aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || : - """ - } -} - -TestsReport = '\n' -void makeReport() { - echo "=========================[ Generating Test Report ]=========================" - for (int i=0; i<'+ testResult +'/>\n' - } - TestsReport = TestsReport + '\n' -} - -void shutdownCluster(String CLUSTER_SUFFIX) { - withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { - sh """ - export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do - kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true - kubectl delete sts --all -n \$namespace --force --grace-period=0 || true - kubectl delete replicasets --all -n \$namespace --force --grace-period=0 || true - kubectl delete poddisruptionbudget --all -n \$namespace --force --grace-period=0 || true - kubectl delete services --all -n \$namespace --force --grace-period=0 || true - kubectl delete pods --all -n \$namespace --force --grace-period=0 || true - done - kubectl get svc --all-namespaces || true - gcloud container clusters delete --zone $region $CLUSTER_NAME-$CLUSTER_SUFFIX --quiet || true - """ - } -} - -pipeline { - environment { - CLOUDSDK_CORE_DISABLE_PROMPTS = 1 - PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim() - } - parameters { - choice( - choices: ['run-release.csv', 'run-distro.csv'], - description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.', - name: 'TEST_SUITE') - text( - defaultValue: '', - description: 'List of tests to run separated by new line', - name: 'TEST_LIST') - choice( - choices: 'NO\nYES', - description: 'Ignore passed tests in previous run (run all)', - name: 'IGNORE_PREVIOUS_RUN' - ) - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: 'latest', - description: 'GKE version', - name: 'PLATFORM_VER') - choice( - choices: 'YES\nNO', - description: 'Run tests in cluster wide mode', - name: 'CLUSTER_WIDE') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') - } - agent { - label 'docker' - } - options { - buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) - skipDefaultCheckout() - disableConcurrentBuilds() - copyArtifactPermission('ps-operator-latest-scheduler'); - } - stages { - stage('Prepare node') { - steps { - prepareNode() - prepareSources() - } - } - stage('Docker Build and Push') { - steps { - dockerBuildPush() - } - } - stage('Init tests') { - steps { - initTests() - } - } - stage('Run Tests') { - options { - timeout(time: 3, unit: 'HOURS') - } - parallel { - stage('cluster1') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster1') - } - } - stage('cluster2') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster2') - } - } - stage('cluster3') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster3') - } - } - stage('cluster4') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster4') - } - } - stage('cluster5') { - agent { - label 'docker' - } - steps { - prepareNode() - unstash "sourceFILES" - clusterRunner('cluster5') - } - } - } - } - } - post { - always { - echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","") - makeReport() - sh """ - echo "$TestsReport" > TestsReport.xml - """ - step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) - archiveArtifacts '*.xml' - - script { - if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { - slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" - } - - clusters.each { shutdownCluster(it) } - } - - sh """ - sudo docker system prune --volumes -af - sudo rm -rf * - """ - deleteDir() - } - } -} diff --git a/cloud/jenkins/ps_operator_eks_latest.groovy b/cloud/jenkins/pso-eks.groovy similarity index 66% rename from cloud/jenkins/ps_operator_eks_latest.groovy rename to cloud/jenkins/pso-eks.groovy index 617d44a856..e5bb548722 100644 --- a/cloud/jenkins/ps_operator_eks_latest.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -1,8 +1,51 @@ region='eu-west-2' tests=[] clusters=[] +release_versions="source/e2e-tests/release_versions" + +String getParam(String paramName, String keyName = null) { + keyName = keyName ?: paramName + + param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim() + if ("$param") { + echo "$paramName=$param (from params file)" + } else { + error("$keyName not found in params file $release_versions") + } + return param +} void prepareNode() { + echo "=========================[ Cloning the sources ]=========================" + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + sh """ + # sudo is needed for better node recovery after compilation failure + # if building failed on compilation stage directory will have files owned by docker user + sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mongodb-operator source + """ + + if ("$PILLAR_VERSION" != "none") { + echo "=========================[ Getting parameters for release test ]=========================" + IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") + IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}") + IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}") + IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}") + IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY") + IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR") + IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") + IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") + IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") + if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") { + PLATFORM_VER = getParam("PLATFORM_VER", "EKS_${PLATFORM_VER}") + } + } else { + echo "=========================[ Not a release run. Using job params only! ]=========================" + } + echo "=========================[ Installing tools on the Jenkins executor ]=========================" sh """ sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl @@ -25,42 +68,29 @@ void prepareNode() { curl -sL https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_\$(uname -s)_amd64.tar.gz | sudo tar -C /usr/local/bin -xzf - && sudo chmod +x /usr/local/bin/eksctl """ -} -void prepareSources() { if ("$PLATFORM_VER" == "latest") { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - USED_PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim() + PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim() } - } else { - USED_PLATFORM_VER="$PLATFORM_VER" } - echo "USED_PLATFORM_VER=$USED_PLATFORM_VER" - - echo "=========================[ Cloning the sources ]=========================" - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' - sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source - cloud/local/checkout $GIT_REPO $GIT_BRANCH - """ - script { - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-lat-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim() + if ("$IMAGE_MYSQL") { + cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" + currentBuild.displayName = "#" + currentBuild.number + " $PLATFORM_VER" + currentBuild.description = "$GIT_BRANCH $cw " + "$IMAGE_MYSQL".split(":")[1] } + + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() } void dockerBuildPush() { echo "=========================[ Building and Pushing the operator Docker image ]=========================" withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { sh """ - if [[ "$OPERATOR_IMAGE" ]]; then + if [[ "$IMAGE_OPERATOR" ]]; then echo "SKIP: Build is not needed, operator image was set!" else cd source @@ -107,7 +137,7 @@ void initTests() { for (int i=0; i/dev/null 2>&1", returnStatus: true) if (retFileExists == 0) { @@ -121,13 +151,11 @@ void initTests() { } } - withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { + withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE')]) { sh """ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml - chmod 600 source/e2e-tests/conf/cloud-secret.yml """ } - stash includes: "source/**", name: "sourceFILES" } void clusterRunner(String cluster) { @@ -156,55 +184,45 @@ void createCluster(String CLUSTER_SUFFIX) { sh """ timestamp="\$(date +%s)" tee cluster-${CLUSTER_SUFFIX}.yaml << EOF -# An example of ClusterConfig showing nodegroups with mixed instances (spot and on demand): ---- apiVersion: eksctl.io/v1alpha5 kind: ClusterConfig - metadata: - name: $CLUSTER_NAME-$CLUSTER_SUFFIX - region: $region - version: "$USED_PLATFORM_VER" - tags: - 'delete-cluster-after-hours': '10' - 'creation-time': '\$timestamp' - 'team': 'cloud' + name: $CLUSTER_NAME-$CLUSTER_SUFFIX + region: $region + version: "$PLATFORM_VER" + tags: + 'delete-cluster-after-hours': '10' + 'creation-time': '\$timestamp' + 'team': 'cloud' iam: withOIDC: true - addons: - name: aws-ebs-csi-driver wellKnownPolicies: ebsCSIController: true - nodeGroups: - - name: ng-1 - minSize: 3 - maxSize: 5 - desiredCapacity: 3 - instanceType: "m5.xlarge" - iam: - attachPolicyARNs: - - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy - - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy - - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly - - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore - - arn:aws:iam::aws:policy/AmazonS3FullAccess - tags: - 'iit-billing-tag': 'jenkins-eks' - 'delete-cluster-after-hours': '10' - 'team': 'cloud' - 'product': 'ps-operator' +- name: ng-1 + minSize: 3 + maxSize: 5 + instanceType: 'm5.xlarge' + iam: + attachPolicyARNs: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly + - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore + - arn:aws:iam::aws:policy/AmazonS3FullAccess + tags: + 'iit-billing-tag': 'jenkins-eks' + 'delete-cluster-after-hours': '10' + 'team': 'cloud' + 'product': 'ps-operator' EOF """ - // this is needed for always post action because pipeline runs earch parallel step on another instance - stash includes: "cluster-${CLUSTER_SUFFIX}.yaml", name: "cluster-$CLUSTER_SUFFIX-config" - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { sh """ export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - export PATH=/home/ec2-user/.local/bin:\$PATH eksctl create cluster -f cluster-${CLUSTER_SUFFIX}.yaml kubectl annotate storageclass gp2 storageclass.kubernetes.io/is-default-class=true kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user="\$(aws sts get-caller-identity|jq -r '.Arn')" @@ -228,25 +246,25 @@ void runTest(Integer TEST_ID) { sh """ cd source + export DEBUG_TESTS=1 [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator - [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH + export IMAGE=$IMAGE_OPERATOR export IMAGE_MYSQL=$IMAGE_MYSQL - export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + export IMAGE_BACKUP=$IMAGE_BACKUP export IMAGE_ROUTER=$IMAGE_ROUTER export IMAGE_HAPROXY=$IMAGE_HAPROXY - export IMAGE_BACKUP=$IMAGE_BACKUP + export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR export IMAGE_TOOLKIT=$IMAGE_TOOLKIT export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix - export PATH=\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH - export PATH=/home/ec2-user/.local/bin:\$PATH + export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$" """ } } - pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") + pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$PLATFORM_VER-$DB_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") tests[TEST_ID]["result"] = "passed" return true } @@ -280,25 +298,37 @@ void pushArtifactFile(String FILE_NAME) { } } -TestsReport = '\n' void makeReport() { echo "=========================[ Generating Test Report ]=========================" - for (int i=0; i<'+ testResult +'/>\n' + testsReport = "\n" + for (int i = 0; i < tests.size(); i ++) { + testsReport += '<'+ tests[i]["result"] +'/>\n' } - TestsReport = TestsReport + '\n' + testsReport += '\n' + + echo "=========================[ Generating Parameters Report ]=========================" + pipelineParameters = """ + testsuite name=$JOB_NAME + IMAGE_OPERATOR=$IMAGE_OPERATOR + IMAGE_MYSQL=$IMAGE_MYSQL + IMAGE_BACKUP=$IMAGE_BACKUP + IMAGE_ROUTER=$IMAGE_ROUTER + IMAGE_HAPROXY=$IMAGE_HAPROXY + IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + IMAGE_TOOLKIT=$IMAGE_TOOLKIT + IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT + IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER + PLATFORM_VER=$PLATFORM_VER + """ + + writeFile file: "TestsReport.xml", text: testsReport + writeFile file: 'PipelineParameters.txt', text: pipelineParameters } void shutdownCluster(String CLUSTER_SUFFIX) { - unstash "cluster-$CLUSTER_SUFFIX-config" withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { sh """ export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - eksctl delete addon --name aws-ebs-csi-driver --cluster $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region || true for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true kubectl delete sts --all -n \$namespace --force --grace-period=0 || true @@ -341,75 +371,25 @@ void shutdownCluster(String CLUSTER_SUFFIX) { pipeline { environment { - CLOUDSDK_CORE_DISABLE_PROMPTS = 1 - PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim() + DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { - choice( - choices: ['run-release.csv', 'run-distro.csv'], - description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.', - name: 'TEST_SUITE') - text( - defaultValue: '', - description: 'List of tests to run separated by new line', - name: 'TEST_LIST') - choice( - choices: 'NO\nYES', - description: 'Ignore passed tests in previous run (run all)', - name: 'IGNORE_PREVIOUS_RUN' - ) - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: 'latest', - description: 'EKS kubernetes version', - name: 'PLATFORM_VER') - choice( - choices: 'YES\nNO', - description: 'Run tests in cluster wide mode', - name: 'CLUSTER_WIDE') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') + choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') + text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line') + choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)') + choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.') + string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository') + string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'EKS kubernetes version. If set to min or max, value will be automatically taken from release_versions file.') + choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode') + string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main') + string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql') + string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup') + string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router') + string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy') + string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator') + string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit') + string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest') + string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest') } agent { label 'docker' @@ -421,10 +401,9 @@ pipeline { copyArtifactPermission('ps-operator-latest-scheduler'); } stages { - stage('Prepare node') { + stage('Prepare Node') { steps { prepareNode() - prepareSources() } } stage('Docker Build and Push') { @@ -432,69 +411,42 @@ pipeline { dockerBuildPush() } } - stage('Init tests') { + stage('Init Tests') { steps { initTests() } } stage('Run Tests') { - options { - timeout(time: 3, unit: 'HOURS') - } parallel { stage('cluster1') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster1') } } stage('cluster2') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster2') } } stage('cluster3') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster3') } } stage('cluster4') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster4') } } } - } } post { always { echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","") makeReport() - sh """ - echo "$TestsReport" > TestsReport.xml - """ step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) - archiveArtifacts '*.xml' + archiveArtifacts '*.xml,*.txt' script { if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { @@ -506,7 +458,6 @@ pipeline { sh """ sudo docker system prune --volumes -af - sudo rm -rf * """ deleteDir() } diff --git a/cloud/jenkins/ps_operator_gke_version.groovy b/cloud/jenkins/pso-gke.groovy similarity index 62% rename from cloud/jenkins/ps_operator_gke_version.groovy rename to cloud/jenkins/pso-gke.groovy index 2ac3eeacde..41518e16fc 100644 --- a/cloud/jenkins/ps_operator_gke_version.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -1,8 +1,54 @@ region='us-central1-a' tests=[] clusters=[] +release_versions="source/e2e-tests/release_versions" + +String getParam(String paramName, String keyName = null) { + keyName = keyName ?: paramName + + param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim() + if ("$param") { + echo "$paramName=$param (from params file)" + } else { + error("$keyName not found in params file $release_versions") + } + return param +} void prepareNode() { + echo "=========================[ Cloning the sources ]=========================" + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + sh """ + # sudo is needed for better node recovery after compilation failure + # if building failed on compilation stage directory will have files owned by docker user + sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source + """ + + if ("$PILLAR_VERSION" != "none") { + echo "=========================[ Getting parameters for release test ]=========================" + GKE_RELEASE_CHANNEL = "stable" + echo "Forcing GKE_RELEASE_CHANNEL=stable, because it's a release run!" + + IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") + IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}") + IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}") + IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}") + IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY") + IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR") + IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") + IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") + IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") + if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") { + PLATFORM_VER = getParam("PLATFORM_VER", "GKE_${PLATFORM_VER}") + } + } else { + echo "=========================[ Not a release run. Using job params only! ]=========================" + } + echo "=========================[ Installing tools on the Jenkins executor ]=========================" sh """ sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl @@ -42,40 +88,27 @@ EOF gcloud config set project $GCP_PROJECT """ } -} -void prepareSources() { if ("$PLATFORM_VER" == "latest") { - USED_PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", , returnStdout: true).trim() - } else { - USED_PLATFORM_VER="$PLATFORM_VER" + PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", returnStdout: true).trim() } - echo "USED_PLATFORM_VER=$USED_PLATFORM_VER" - - echo "=========================[ Cloning the sources ]=========================" - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' - sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source - cloud/local/checkout $GIT_REPO $GIT_BRANCH - """ - script { - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-ver-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim() + if ("$IMAGE_MYSQL") { + cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" + currentBuild.displayName = "#" + currentBuild.number + " $PLATFORM_VER-$GKE_RELEASE_CHANNEL" + currentBuild.description = "$GIT_BRANCH $cw " + "$IMAGE_MYSQL".split(":")[1] } + + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() } void dockerBuildPush() { echo "=========================[ Building and Pushing the operator Docker image ]=========================" withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { sh """ - if [[ "$OPERATOR_IMAGE" ]]; then + if [[ "$IMAGE_OPERATOR" ]]; then echo "SKIP: Build is not needed, operator image was set!" else cd source @@ -122,7 +155,7 @@ void initTests() { for (int i=0; i/dev/null 2>&1", returnStatus: true) if (retFileExists == 0) { @@ -136,12 +169,11 @@ void initTests() { } } - withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { + withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE')]) { sh """ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ } - stash includes: "source/**", name: "sourceFILES" } void clusterRunner(String cluster) { @@ -170,33 +202,46 @@ void createCluster(String CLUSTER_SUFFIX) { withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { sh """ export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX - maxRetries=15 exitCode=1 + while [[ \$exitCode != 0 && \$maxRetries > 0 ]]; do - ret_val=0 gcloud container clusters create $CLUSTER_NAME-$CLUSTER_SUFFIX \ --release-channel $GKE_RELEASE_CHANNEL \ --zone $region \ - --cluster-version $USED_PLATFORM_VER \ - --machine-type n1-standard-4 \ + --cluster-version $PLATFORM_VER \ --preemptible \ --disk-size 30 \ - --num-nodes=3 \ - --network=jenkins-ps-vpc \ - --subnetwork=jenkins-ps-$CLUSTER_SUFFIX \ - --no-enable-autoupgrade \ + --machine-type n1-standard-4 \ + --num-nodes=4 \ + --min-nodes=4 \ + --max-nodes=6 \ + --network=jenkins-vpc \ + --subnetwork=jenkins-$CLUSTER_SUFFIX \ --cluster-ipv4-cidr=/21 \ - --labels delete-cluster-after-hours=6 &&\ - kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user jenkins@"$GCP_PROJECT".iam.gserviceaccount.com + --labels delete-cluster-after-hours=6 \ + --enable-ip-alias &&\ + kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user=\$(gcloud config get-value core/account) exitCode=\$? if [[ \$exitCode == 0 ]]; then break; fi (( maxRetries -- )) sleep 1 done if [[ \$exitCode != 0 ]]; then exit \$exitCode; fi + + CURRENT_TIME=\$(date --rfc-3339=seconds) + FUTURE_TIME=\$(date -d '6 hours' --rfc-3339=seconds) + + # When using the STABLE release channel, auto-upgrade must be enabled for node pools, which means you cannot manually disable it, + # so we can't just use --no-enable-autoupgrade in the command above, so we need the following workaround. + gcloud container clusters update $CLUSTER_NAME-$CLUSTER_SUFFIX \ + --zone $region \ + --add-maintenance-exclusion-start "\$CURRENT_TIME" \ + --add-maintenance-exclusion-end "\$FUTURE_TIME" + + kubectl get nodes -o custom-columns="NAME:.metadata.name,TAINTS:.spec.taints,AGE:.metadata.creationTimestamp" """ - } + } } void runTest(Integer TEST_ID) { @@ -214,13 +259,14 @@ void runTest(Integer TEST_ID) { sh """ cd source + export DEBUG_TESTS=1 [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator - [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH + export IMAGE=$IMAGE_OPERATOR export IMAGE_MYSQL=$IMAGE_MYSQL - export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + export IMAGE_BACKUP=$IMAGE_BACKUP export IMAGE_ROUTER=$IMAGE_ROUTER export IMAGE_HAPROXY=$IMAGE_HAPROXY - export IMAGE_BACKUP=$IMAGE_BACKUP + export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR export IMAGE_TOOLKIT=$IMAGE_TOOLKIT export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER @@ -230,7 +276,7 @@ void runTest(Integer TEST_ID) { kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$" """ } - pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") + pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$PLATFORM_VER-$DB_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") tests[TEST_ID]["result"] = "passed" return true } @@ -264,17 +310,31 @@ void pushArtifactFile(String FILE_NAME) { } } -TestsReport = '\n' void makeReport() { echo "=========================[ Generating Test Report ]=========================" - for (int i=0; i<'+ testResult +'/>\n' + testsReport = "\n" + for (int i = 0; i < tests.size(); i ++) { + testsReport += '<'+ tests[i]["result"] +'/>\n' } - TestsReport = TestsReport + '\n' + testsReport += '\n' + + echo "=========================[ Generating Parameters Report ]=========================" + pipelineParameters = """ + testsuite name=$JOB_NAME + IMAGE_OPERATOR=$IMAGE_OPERATOR + IMAGE_MYSQL=$IMAGE_MYSQL + IMAGE_BACKUP=$IMAGE_BACKUP + IMAGE_ROUTER=$IMAGE_ROUTER + IMAGE_HAPROXY=$IMAGE_HAPROXY + IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + IMAGE_TOOLKIT=$IMAGE_TOOLKIT + IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT + IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER + PLATFORM_VER=$PLATFORM_VER + """ + + writeFile file: "TestsReport.xml", text: testsReport + writeFile file: 'PipelineParameters.txt', text: pipelineParameters } void shutdownCluster(String CLUSTER_SUFFIX) { @@ -297,79 +357,26 @@ void shutdownCluster(String CLUSTER_SUFFIX) { pipeline { environment { - CLOUDSDK_CORE_DISABLE_PROMPTS = 1 - PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim() + DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { - choice( - choices: ['run-release.csv', 'run-distro.csv'], - description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.', - name: 'TEST_SUITE') - text( - defaultValue: '', - description: 'List of tests to run separated by new line', - name: 'TEST_LIST') - choice( - choices: 'NO\nYES', - description: 'Ignore passed tests in previous run (run all)', - name: 'IGNORE_PREVIOUS_RUN' - ) - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: 'latest', - description: 'GKE version', - name: 'PLATFORM_VER') - choice( - choices: 'None\nstable\nregular\nrapid', - description: 'GKE release channel', - name: 'GKE_RELEASE_CHANNEL') - choice( - choices: 'YES\nNO', - description: 'Run tests in cluster wide mode', - name: 'CLUSTER_WIDE') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') + choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') + text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line') + choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)') + choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.') + string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository') + string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'GKE kubernetes version. If set to min or max, value will be automatically taken from release_versions file.') + choice(name: 'GKE_RELEASE_CHANNEL', choices: 'rapid\nstable\nregular\nNone', description: 'GKE release channel. Will be forced to stable for release run.') + choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode') + string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main') + string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql') + string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup') + string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router') + string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy') + string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator') + string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit') + string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest') + string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest') } agent { label 'docker' @@ -381,10 +388,9 @@ pipeline { copyArtifactPermission('ps-operator-latest-scheduler'); } stages { - stage('Prepare node') { + stage('Prepare Node') { steps { prepareNode() - prepareSources() } } stage('Docker Build and Push') { @@ -392,63 +398,35 @@ pipeline { dockerBuildPush() } } - stage('Init tests') { + stage('Init Tests') { steps { initTests() } } stage('Run Tests') { - options { - timeout(time: 3, unit: 'HOURS') - } parallel { stage('cluster1') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster1') } } stage('cluster2') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster2') } } stage('cluster3') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster3') } } stage('cluster4') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster4') } } stage('cluster5') { - agent { - label 'docker' - } steps { - prepareNode() - unstash "sourceFILES" clusterRunner('cluster5') } } @@ -459,11 +437,8 @@ pipeline { always { echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","") makeReport() - sh """ - echo "$TestsReport" > TestsReport.xml - """ step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) - archiveArtifacts '*.xml' + archiveArtifacts '*.xml,*.txt' script { if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { @@ -475,7 +450,6 @@ pipeline { sh """ sudo docker system prune --volumes -af - sudo rm -rf * """ deleteDir() } diff --git a/cloud/jenkins/ps_operator_minikube.groovy b/cloud/jenkins/pso-minikube.groovy similarity index 56% rename from cloud/jenkins/ps_operator_minikube.groovy rename to cloud/jenkins/pso-minikube.groovy index ad4aa2d704..36aa6841d3 100644 --- a/cloud/jenkins/ps_operator_minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -1,12 +1,19 @@ tests=[] +release_versions="source/e2e-tests/release_versions" -void checkoutSources() { - if ("$IMAGE_MYSQL") { - currentBuild.description = "$GIT_BRANCH-$PLATFORM_VER-CW_$CLUSTER_WIDE-" + "$IMAGE_MYSQL".split(":")[1] - } +String getParam(String paramName, String keyName = null) { + keyName = keyName ?: paramName - echo "USED_PLATFORM_VER=$PLATFORM_VER" + param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim() + if ("$param") { + echo "$paramName=$param (from params file)" + } else { + error("$keyName not found in params file $release_versions") + } + return param +} +void prepareNode() { echo "=========================[ Cloning the sources ]=========================" git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ @@ -16,23 +23,69 @@ void checkoutSources() { sudo git reset --hard sudo git clean -xdf sudo rm -rf source - cloud/local/checkout $GIT_REPO $GIT_BRANCH + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mongodb-operator source + """ + + if ("$PILLAR_VERSION" != "none") { + echo "=========================[ Getting parameters for release test ]=========================" + IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") + IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}") + IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}") + IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}") + IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY") + IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR") + IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") + IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") + IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") + if ("$PLATFORM_VER".toLowerCase() == "rel") { + PLATFORM_VER = getParam("PLATFORM_VER", "MINIKUBE_${PLATFORM_VER}") + } + } else { + echo "=========================[ Not a release run. Using job params only! ]=========================" + } + + echo "=========================[ Installing tools on the Jenkins executor ]=========================" + sh """ + sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl + kubectl version --client --output=yaml + + curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm + + sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq + sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq + + sudo curl -sLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x /usr/local/bin/minikube + + curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - + ./krew-linux_amd64 install krew + export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" + + kubectl krew install assert + + # v0.17.0 kuttl version + kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml + echo \$(kubectl kuttl --version) is installed """ - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim() + if ("$IMAGE_MYSQL") { + cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" + currentBuild.displayName = "#" + currentBuild.number + " $PLATFORM_VER" + currentBuild.description = "$GIT_BRANCH $cw " + "$IMAGE_MYSQL".split(":")[1] + } + + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() } void dockerBuildPush() { echo "=========================[ Building and Pushing the operator Docker image ]=========================" withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { sh """ - if [[ "$OPERATOR_IMAGE" ]]; then + if [[ "$IMAGE_OPERATOR" ]]; then echo "SKIP: Build is not needed, operator image was set!" else cd source sg docker -c " - docker buildx create --use docker login -u '$USER' -p '$PASS' export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH e2e-tests/build @@ -96,38 +149,10 @@ void initTests() { } } -void installToolsOnNode() { - echo "=========================[ Installing tools on the Jenkins executor ]=========================" - sh """ - sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl - kubectl version --client --output=yaml - - curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm - - sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/v4.35.1/yq_linux_amd64 > /usr/local/bin/yq" - sudo chmod +x /usr/local/bin/yq - - sudo sh -c "curl -s -L https://github.com/jqlang/jq/releases/download/jq-1.6/jq-linux64 > /usr/local/bin/jq" - sudo chmod +x /usr/local/bin/jq - - sudo curl -sLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x /usr/local/bin/minikube - - curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - - ./krew-linux_amd64 install krew - export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" - - kubectl krew install assert - - # v0.17.0 kuttl version - kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml - echo \$(kubectl kuttl --version) is installed - """ -} - void clusterRunner(String cluster) { sh """ export CHANGE_MINIKUBE_NONE_USER=true - /usr/local/bin/minikube start --kubernetes-version $PLATFORM_VER --cpus=6 --memory=28G + minikube start --kubernetes-version $PLATFORM_VER --cpus=6 --memory=28G """ for (int i=0; i<'+ testResult +'/>\n' + testsReport = "\n" + for (int i = 0; i < tests.size(); i ++) { + testsReport += '<'+ tests[i]["result"] +'/>\n' } - TestsReport = TestsReport + '\n' -} + testsReport += '\n' + + echo "=========================[ Generating Parameters Report ]=========================" + pipelineParameters = """ + testsuite name=$JOB_NAME + IMAGE_OPERATOR=$IMAGE_OPERATOR + IMAGE_MYSQL=$IMAGE_MYSQL + IMAGE_BACKUP=$IMAGE_BACKUP + IMAGE_ROUTER=$IMAGE_ROUTER + IMAGE_HAPROXY=$IMAGE_HAPROXY + IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + IMAGE_TOOLKIT=$IMAGE_TOOLKIT + IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT + IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER + PLATFORM_VER=$PLATFORM_VER + """ + writeFile file: "TestsReport.xml", text: testsReport + writeFile file: 'PipelineParameters.txt', text: pipelineParameters +} pipeline { environment { CLEAN_NAMESPACE = 1 - DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim() + DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } - parameters { - choice( - choices: ['run-minikube.csv', 'run-distro.csv'], - description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.', - name: 'TEST_SUITE') - text( - defaultValue: '', - description: 'List of tests to run separated by new line', - name: 'TEST_LIST') - choice( - choices: 'NO\nYES', - description: 'Ignore passed tests in previous run (run all)', - name: 'IGNORE_PREVIOUS_RUN' - ) - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona/percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: 'latest', - description: 'Minikube Kubernetes Version', - name: 'PLATFORM_VER', - trim: true) - choice( - choices: 'YES\nNO', - description: 'Run tests in cluster wide mode', - name: 'CLUSTER_WIDE') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') + choice(name: 'TEST_SUITE', choices: ['run-minikube.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') + text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line') + choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)') + choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.') + string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository') + string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'Minikube kubernetes version. If set to rel, value will be automatically taken from release_versions file.') + choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode') + string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main') + string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql') + string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup') + string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router') + string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy') + string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator') + string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit') + string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest') + string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest') } - agent { label 'docker-32gb' } - options { buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) skipDefaultCheckout() } - stages { - stage('Checkout sources') { + stage('Prepare Node') { steps { - checkoutSources() + prepareNode() } } stage('Docker Build and Push') { @@ -311,7 +295,7 @@ pipeline { dockerBuildPush() } } - stage('Init tests') { + stage('Init Tests') { steps { initTests() } @@ -321,25 +305,27 @@ pipeline { timeout(time: 3, unit: 'HOURS') } steps { - installToolsOnNode() clusterRunner('cluster1') } } } - post { always { echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","") makeReport() - sh """ - echo "$TestsReport" > TestsReport.xml - """ step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) - archiveArtifacts '*.xml' + archiveArtifacts '*.xml,*.txt' + + script { + if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { + slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" + } + } + sh """ - /usr/local/bin/minikube delete || true + minikube delete || true """ deleteDir() } } -} \ No newline at end of file +} diff --git a/cloud/jenkins/weekly-pso.groovy b/cloud/jenkins/weekly-pso.groovy new file mode 100644 index 0000000000..0852ea0a03 --- /dev/null +++ b/cloud/jenkins/weekly-pso.groovy @@ -0,0 +1,41 @@ +void triggerJobMultiple(String jobName) { + for (int i = 1; i <= 3; i++) { + build job: "$jobName", propagate: false, wait: true + } +} + +pipeline { + agent any + options { + skipDefaultCheckout() + disableConcurrentBuilds() + buildDiscarder(logRotator(numToKeepStr: '10', artifactNumToKeepStr: '10')) + } + triggers { + cron('0 8 * * 0') + } + stages { + stage("Run parallel") { + parallel { + stage('Trigger pso-gke job 3 times') { + steps { + triggerJobMultiple("pso-gke") + } + } + stage('Trigger psmo-eks job 3 times') { + steps { + triggerJobMultiple("pso-eks") + } + } + } + } + } + post { + always { + copyArtifacts(projectName: 'pso-gke', selector: lastCompleted(), target: 'pso-gke') + copyArtifacts(projectName: 'pso-eks', selector: lastCompleted(), target: 'pso-eks') + archiveArtifacts '*/*.xml' + step([$class: 'JUnitResultArchiver', testResults: '*/*.xml', healthScaleFactor: 1.0]) + } + } +} From dadd621b4deaa261fb449051f8d1d02ea25e77a6 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 28 Jan 2025 17:39:38 +0200 Subject: [PATCH 02/24] Update credentials ID for cloud secret file in PSO GKE Jenkins script --- cloud/jenkins/pso-gke.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso-gke.groovy index 41518e16fc..6302825f9d 100644 --- a/cloud/jenkins/pso-gke.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -169,7 +169,7 @@ void initTests() { } } - withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE')]) { + withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { sh """ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ From f62b238d61951024b70c9bca107542bb39ec4a0b Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:18:24 +0200 Subject: [PATCH 03/24] Update credentials ID for cloud secret file in PSO EKS and Minikube Jenkins scripts --- cloud/jenkins/pso-eks.groovy | 2 +- cloud/jenkins/pso-minikube.groovy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index e5bb548722..cef2b93b55 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -151,7 +151,7 @@ void initTests() { } } - withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE')]) { + withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { sh """ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso-minikube.groovy index 36aa6841d3..78954822bf 100644 --- a/cloud/jenkins/pso-minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -142,7 +142,7 @@ void initTests() { } } - withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE')]) { + withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { sh """ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ From ff01cfd5f0a0b25a07ff42d3ad934e5a40acecd2 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Thu, 30 Jan 2025 14:27:13 +0200 Subject: [PATCH 04/24] Update git clone URL in PSO EKS Jenkins script to point to MySQL operator --- cloud/jenkins/pso-eks.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index cef2b93b55..38063626e0 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -25,7 +25,7 @@ void prepareNode() { sudo git reset --hard sudo git clean -xdf sudo rm -rf source - git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mongodb-operator source + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ if ("$PILLAR_VERSION" != "none") { From 645453a825d9cb41085203260e30533c3de2490d Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Thu, 30 Jan 2025 14:32:36 +0200 Subject: [PATCH 05/24] Update git clone URL in PSO Minikube Jenkins script to point to MySQL operator --- cloud/jenkins/pso-minikube.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso-minikube.groovy index 78954822bf..605cf5c133 100644 --- a/cloud/jenkins/pso-minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -23,7 +23,7 @@ void prepareNode() { sudo git reset --hard sudo git clean -xdf sudo rm -rf source - git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mongodb-operator source + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ if ("$PILLAR_VERSION" != "none") { From 0ea48bd9d7e5175c2f22efac0df33b7b8c1f4072 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Thu, 30 Jan 2025 15:34:26 +0200 Subject: [PATCH 06/24] Add error logging for test execution in PSO Minikube Jenkins script --- cloud/jenkins/pso-minikube.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso-minikube.groovy index 605cf5c133..49dbd1c847 100644 --- a/cloud/jenkins/pso-minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -188,7 +188,6 @@ void runTest(Integer TEST_ID) { export IMAGE_TOOLKIT=$IMAGE_TOOLKIT export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER - export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$" @@ -198,6 +197,7 @@ void runTest(Integer TEST_ID) { return true } catch (exc) { + echo "Error occurred while running test $testName: ${exc}" if (retryCount >= 1) { currentBuild.result = 'FAILURE' return true From 6d4c19ae54e0965c2cad41d1c6f893ef2047bebb Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Fri, 31 Jan 2025 19:11:05 +0200 Subject: [PATCH 07/24] Update Jenkins build display name and description format for MySQL images --- cloud/jenkins/pso-eks.groovy | 4 ++-- cloud/jenkins/pso-gke.groovy | 4 ++-- cloud/jenkins/pso-minikube.groovy | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index 38063626e0..259a1a11be 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -77,8 +77,8 @@ void prepareNode() { if ("$IMAGE_MYSQL") { cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" - currentBuild.displayName = "#" + currentBuild.number + " $PLATFORM_VER" - currentBuild.description = "$GIT_BRANCH $cw " + "$IMAGE_MYSQL".split(":")[1] + currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" + currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw" } GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso-gke.groovy index 6302825f9d..fc780815a4 100644 --- a/cloud/jenkins/pso-gke.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -95,8 +95,8 @@ EOF if ("$IMAGE_MYSQL") { cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" - currentBuild.displayName = "#" + currentBuild.number + " $PLATFORM_VER-$GKE_RELEASE_CHANNEL" - currentBuild.description = "$GIT_BRANCH $cw " + "$IMAGE_MYSQL".split(":")[1] + currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" + urrentBuild.description = "$PLATFORM_VER-$GKE_RELEASE_CHANNEL $ARCH " + "$IMAGE_MYSQL".split(":")[1] + " $cw" } GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso-minikube.groovy index 49dbd1c847..e27614856c 100644 --- a/cloud/jenkins/pso-minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -69,8 +69,8 @@ void prepareNode() { if ("$IMAGE_MYSQL") { cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" - currentBuild.displayName = "#" + currentBuild.number + " $PLATFORM_VER" - currentBuild.description = "$GIT_BRANCH $cw " + "$IMAGE_MYSQL".split(":")[1] + currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" + currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw" } GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() From 6222c0935961917437e586c6aacd63affaaf26bd Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Sat, 1 Feb 2025 22:35:47 +0200 Subject: [PATCH 08/24] Refactor Jenkins pipeline: split prepareNode into prepareAgent, prepareSources, and initParams functions --- cloud/jenkins/pso-gke.groovy | 117 +++++++++++++++++++++++------------ 1 file changed, 76 insertions(+), 41 deletions(-) diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso-gke.groovy index fc780815a4..30a830ae04 100644 --- a/cloud/jenkins/pso-gke.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -15,40 +15,7 @@ String getParam(String paramName, String keyName = null) { return param } -void prepareNode() { - echo "=========================[ Cloning the sources ]=========================" - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' - sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source - git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source - """ - - if ("$PILLAR_VERSION" != "none") { - echo "=========================[ Getting parameters for release test ]=========================" - GKE_RELEASE_CHANNEL = "stable" - echo "Forcing GKE_RELEASE_CHANNEL=stable, because it's a release run!" - - IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") - IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}") - IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}") - IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}") - IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY") - IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR") - IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") - IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") - IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") - if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") { - PLATFORM_VER = getParam("PLATFORM_VER", "GKE_${PLATFORM_VER}") - } - } else { - echo "=========================[ Not a release run. Using job params only! ]=========================" - } - +void prepareAgent() { echo "=========================[ Installing tools on the Jenkins executor ]=========================" sh """ sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl @@ -82,12 +49,53 @@ EOF """ echo "=========================[ Logging in the Kubernetes provider ]=========================" - withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { sh """ gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE gcloud config set project $GCP_PROJECT """ } +} + +void prepareSources() { + echo "=========================[ Cloning the sources ]=========================" + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + sh """ + # sudo is needed for better node recovery after compilation failure + # if building failed on compilation stage directory will have files owned by docker user + sudo git config --global --add safe.directory '*' + sudo git reset --hard + sudo git clean -xdf + sudo rm -rf source + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source + """ + + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() +} + +void initParams() { + if ("$PILLAR_VERSION" != "none") { + echo "=========================[ Getting parameters for release test ]=========================" + GKE_RELEASE_CHANNEL = "stable" + echo "Forcing GKE_RELEASE_CHANNEL=stable, because it's a release run!" + + IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") + IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}") + IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}") + IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}") + IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY") + IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR") + IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") + IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") + IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") + if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") { + PLATFORM_VER = getParam("PLATFORM_VER", "GKE_${PLATFORM_VER}") + } + } else { + echo "=========================[ Not a release run. Using job params only! ]=========================" + } if ("$PLATFORM_VER" == "latest") { PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", returnStdout: true).trim() @@ -96,12 +104,8 @@ EOF if ("$IMAGE_MYSQL") { cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" - urrentBuild.description = "$PLATFORM_VER-$GKE_RELEASE_CHANNEL $ARCH " + "$IMAGE_MYSQL".split(":")[1] + " $cw" + currentBuild.description = "$PLATFORM_VER-$GKE_RELEASE_CHANNEL " + "$IMAGE_MYSQL".split(":")[1] + " $cw" } - - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() } void dockerBuildPush() { @@ -174,6 +178,7 @@ void initTests() { cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ } + stash includes: "source/**", name: "sourceFILES" } void clusterRunner(String cluster) { @@ -390,7 +395,9 @@ pipeline { stages { stage('Prepare Node') { steps { - prepareNode() + prepareAgent() + prepareSources() + initParams() } } stage('Docker Build and Push') { @@ -404,29 +411,57 @@ pipeline { } } stage('Run Tests') { + options { + timeout(time: 3, unit: 'HOURS') + } parallel { stage('cluster1') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster1') } } stage('cluster2') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster2') } } stage('cluster3') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster3') } } stage('cluster4') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster4') } } stage('cluster5') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster5') } } From 88eb4e785de2feb2a80a83652dbead34094d6584 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Sat, 1 Feb 2025 22:57:24 +0200 Subject: [PATCH 09/24] Refactor Jenkins pipeline: rename prepareNode to prepareAgent, add prepareSources and initParams functions --- cloud/jenkins/pso-eks.groovy | 88 ++++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 29 deletions(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index 259a1a11be..370fd75875 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -15,7 +15,32 @@ String getParam(String paramName, String keyName = null) { return param } -void prepareNode() { +void prepareAgent() { + echo "=========================[ Installing tools on the Jenkins executor ]=========================" + sh """ + sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl + kubectl version --client --output=yaml + + curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm + + sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq + sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq + + curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - + ./krew-linux_amd64 install krew + export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" + + kubectl krew install assert + + # v0.17.0 kuttl version + kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml + echo \$(kubectl kuttl --version) is installed + + curl -sL https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_\$(uname -s)_amd64.tar.gz | sudo tar -C /usr/local/bin -xzf - && sudo chmod +x /usr/local/bin/eksctl + """ +} + +void prepareSources() { echo "=========================[ Cloning the sources ]=========================" git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ @@ -28,6 +53,12 @@ void prepareNode() { git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() +} + +void initParams() { if ("$PILLAR_VERSION" != "none") { echo "=========================[ Getting parameters for release test ]=========================" IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") @@ -46,29 +77,6 @@ void prepareNode() { echo "=========================[ Not a release run. Using job params only! ]=========================" } - echo "=========================[ Installing tools on the Jenkins executor ]=========================" - sh """ - sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl - kubectl version --client --output=yaml - - curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm - - sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq - sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq - - curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - - ./krew-linux_amd64 install krew - export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" - - kubectl krew install assert - - # v0.17.0 kuttl version - kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml - echo \$(kubectl kuttl --version) is installed - - curl -sL https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_\$(uname -s)_amd64.tar.gz | sudo tar -C /usr/local/bin -xzf - && sudo chmod +x /usr/local/bin/eksctl - """ - if ("$PLATFORM_VER" == "latest") { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim() @@ -80,10 +88,6 @@ void prepareNode() { currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw" } - - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() } void dockerBuildPush() { @@ -156,6 +160,7 @@ void initTests() { cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ } + stash includes: "source/**", name: "sourceFILES" } void clusterRunner(String cluster) { @@ -403,7 +408,9 @@ pipeline { stages { stage('Prepare Node') { steps { - prepareNode() + prepareAgent() + prepareSources() + initParams() } } stage('Docker Build and Push') { @@ -417,24 +424,47 @@ pipeline { } } stage('Run Tests') { + options { + timeout(time: 3, unit: 'HOURS') + } parallel { stage('cluster1') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster1') } } stage('cluster2') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster2') } } stage('cluster3') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster3') } } stage('cluster4') { + agent { + label 'docker' + } steps { + prepareAgent() + unstash "sourceFILES" clusterRunner('cluster4') } } From 5cea412c1c42545740a83407f6f0d5812494b8ba Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 3 Feb 2025 22:27:09 +0200 Subject: [PATCH 10/24] also adding shutdownCluster(cluster) inside a post { always {} } block within each parallel stage to have post steps for each step --- cloud/jenkins/pso-eks.groovy | 32 +++++++++++--------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index 370fd75875..f17b7425a6 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -164,23 +164,19 @@ void initTests() { } void clusterRunner(String cluster) { - def clusterCreated=0 + def clusterCreated = false for (int i=0; i= 1) { - shutdownCluster(cluster) - } } void createCluster(String CLUSTER_SUFFIX) { @@ -429,44 +425,40 @@ pipeline { } parallel { stage('cluster1') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster1') } + post { always { script { shutdownCluster('cluster1') } } } } stage('cluster2') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster2') } + post { always { script { shutdownCluster('cluster2') } } } } stage('cluster3') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster3') } + post { always { script { shutdownCluster('cluster3') } } } } stage('cluster4') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster4') } + post { always { script { shutdownCluster('cluster4') } } } } } } @@ -482,8 +474,6 @@ pipeline { if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } - - clusters.each { shutdownCluster(it) } } sh """ From 6316e288683a71ac6baf450a8d6c31a5c38f399a Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 4 Feb 2025 11:38:36 +0200 Subject: [PATCH 11/24] Remove unused clusters list from createCluster function in Jenkins pipeline --- cloud/jenkins/pso-eks.groovy | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index f17b7425a6..576b6fc08a 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -1,6 +1,5 @@ region='eu-west-2' tests=[] -clusters=[] release_versions="source/e2e-tests/release_versions" String getParam(String paramName, String keyName = null) { @@ -180,8 +179,6 @@ void clusterRunner(String cluster) { } void createCluster(String CLUSTER_SUFFIX) { - clusters.add("$CLUSTER_SUFFIX") - sh """ timestamp="\$(date +%s)" tee cluster-${CLUSTER_SUFFIX}.yaml << EOF From f5f81921eaa4e1d8925d6dbc1ffdc1f0ee9f006c Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Tue, 4 Feb 2025 14:02:36 +0200 Subject: [PATCH 12/24] Refactor Jenkins pipeline: change cluster creation logic to use boolean flag and add shutdownCluster calls in post steps for each cluster stage --- cloud/jenkins/pso-gke.groovy | 40 ++++++++++++------------------------ 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso-gke.groovy index 30a830ae04..52aac3583d 100644 --- a/cloud/jenkins/pso-gke.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -1,6 +1,5 @@ region='us-central1-a' tests=[] -clusters=[] release_versions="source/e2e-tests/release_versions" String getParam(String paramName, String keyName = null) { @@ -182,28 +181,22 @@ void initTests() { } void clusterRunner(String cluster) { - def clusterCreated=0 + def clusterCreated = false for (int i=0; i= 1) { - shutdownCluster(cluster) - } } void createCluster(String CLUSTER_SUFFIX) { - clusters.add("$CLUSTER_SUFFIX") - withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { sh """ export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX @@ -416,54 +409,49 @@ pipeline { } parallel { stage('cluster1') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster1') } + post { always { script { shutdownCluster('cluster1') } } } } stage('cluster2') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster2') } + post { always { script { shutdownCluster('cluster2') } } } } stage('cluster3') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster3') } + post { always { script { shutdownCluster('cluster3') } } } } stage('cluster4') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster4') } + post { always { script { shutdownCluster('cluster4') } } } } stage('cluster5') { - agent { - label 'docker' - } + agent { label 'docker' } steps { prepareAgent() unstash "sourceFILES" clusterRunner('cluster5') } + post { always { script { shutdownCluster('cluster5') } } } } } } @@ -479,8 +467,6 @@ pipeline { if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } - - clusters.each { shutdownCluster(it) } } sh """ From b8f4235e2e78f1a068b83635f5b51a8288b5c9ed Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 5 Feb 2025 10:55:41 +0200 Subject: [PATCH 13/24] Refactor Jenkins pipeline: remove redundant unstash calls, make minikube pipeline parallel --- cloud/jenkins/pso-eks.groovy | 13 +-- cloud/jenkins/pso-gke.groovy | 13 +-- cloud/jenkins/pso-minikube.groovy | 165 +++++++++++++++++++----------- 3 files changed, 109 insertions(+), 82 deletions(-) diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso-eks.groovy index 576b6fc08a..c0c82bda29 100644 --- a/cloud/jenkins/pso-eks.groovy +++ b/cloud/jenkins/pso-eks.groovy @@ -53,8 +53,8 @@ void prepareSources() { """ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() } void initParams() { @@ -233,6 +233,7 @@ void runTest(Integer TEST_ID) { def testName = tests[TEST_ID]["name"] def clusterSuffix = tests[TEST_ID]["cluster"] + unstash "sourceFILES" waitUntil { def timeStart = new Date().getTime() try { @@ -267,6 +268,7 @@ void runTest(Integer TEST_ID) { return true } catch (exc) { + echo "Error occurred while running test $testName: $exc" if (retryCount >= 1) { currentBuild.result = 'FAILURE' return true @@ -425,7 +427,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster1') } post { always { script { shutdownCluster('cluster1') } } } @@ -434,7 +435,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster2') } post { always { script { shutdownCluster('cluster2') } } } @@ -443,7 +443,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster3') } post { always { script { shutdownCluster('cluster3') } } } @@ -452,7 +451,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster4') } post { always { script { shutdownCluster('cluster4') } } } @@ -472,11 +470,6 @@ pipeline { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } } - - sh """ - sudo docker system prune --volumes -af - """ - deleteDir() } } } diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso-gke.groovy index 52aac3583d..9e4e5de092 100644 --- a/cloud/jenkins/pso-gke.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -47,7 +47,6 @@ EOF sudo yum install -y google-cloud-cli google-cloud-cli-gke-gcloud-auth-plugin """ - echo "=========================[ Logging in the Kubernetes provider ]=========================" withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { sh """ gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE @@ -247,6 +246,8 @@ void runTest(Integer TEST_ID) { def testName = tests[TEST_ID]["name"] def clusterSuffix = tests[TEST_ID]["cluster"] + unstash "sourceFILES" + waitUntil { def timeStart = new Date().getTime() try { @@ -412,7 +413,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster1') } post { always { script { shutdownCluster('cluster1') } } } @@ -421,7 +421,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster2') } post { always { script { shutdownCluster('cluster2') } } } @@ -430,7 +429,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster3') } post { always { script { shutdownCluster('cluster3') } } } @@ -439,7 +437,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster4') } post { always { script { shutdownCluster('cluster4') } } } @@ -448,7 +445,6 @@ pipeline { agent { label 'docker' } steps { prepareAgent() - unstash "sourceFILES" clusterRunner('cluster5') } post { always { script { shutdownCluster('cluster5') } } } @@ -468,11 +464,6 @@ pipeline { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } } - - sh """ - sudo docker system prune --volumes -af - """ - deleteDir() } } } diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso-minikube.groovy index e27614856c..cbe2daa34a 100644 --- a/cloud/jenkins/pso-minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -13,7 +13,32 @@ String getParam(String paramName, String keyName = null) { return param } -void prepareNode() { +void prepareAgent() { + echo "=========================[ Installing tools on the Jenkins executor ]=========================" + sh """ + sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl + kubectl version --client --output=yaml + + curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm + + sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq + sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq + + curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - + ./krew-linux_amd64 install krew + export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" + + kubectl krew install assert + + # v0.17.0 kuttl version + kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml + echo \$(kubectl kuttl --version) is installed + + sudo curl -sLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x /usr/local/bin/minikube + """ +} + +void prepareSources() { echo "=========================[ Cloning the sources ]=========================" git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ @@ -26,6 +51,11 @@ void prepareNode() { git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() +} + +void initParams() { if ("$PILLAR_VERSION" != "none") { echo "=========================[ Getting parameters for release test ]=========================" IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") @@ -37,44 +67,18 @@ void prepareNode() { IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") - if ("$PLATFORM_VER".toLowerCase() == "rel") { + if ("$PLATFORM_VER".toLowerCase() == "max") { PLATFORM_VER = getParam("PLATFORM_VER", "MINIKUBE_${PLATFORM_VER}") } } else { echo "=========================[ Not a release run. Using job params only! ]=========================" } - echo "=========================[ Installing tools on the Jenkins executor ]=========================" - sh """ - sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl - kubectl version --client --output=yaml - - curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm - - sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq - sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq - - sudo curl -sLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x /usr/local/bin/minikube - - curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - - ./krew-linux_amd64 install krew - export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" - - kubectl krew install assert - - # v0.17.0 kuttl version - kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml - echo \$(kubectl kuttl --version) is installed - """ - if ("$IMAGE_MYSQL") { cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw" } - - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() } void dockerBuildPush() { @@ -147,57 +151,71 @@ void initTests() { cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ } + stash includes: "source/**", name: "sourceFILES" } void clusterRunner(String cluster) { - sh """ - export CHANGE_MINIKUBE_NONE_USER=true - minikube start --kubernetes-version $PLATFORM_VER --cpus=6 --memory=28G - """ + def clusterCreated = false for (int i=0; i= 1) { currentBuild.result = 'FAILURE' return true @@ -256,7 +274,6 @@ void makeReport() { pipeline { environment { - CLEAN_NAMESPACE = 1 DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { @@ -283,11 +300,15 @@ pipeline { options { buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) skipDefaultCheckout() + disableConcurrentBuilds() + copyArtifactPermission('ps-operator-latest-scheduler'); } stages { stage('Prepare Node') { steps { - prepareNode() + prepareAgent() + prepareSources() + initParams() } } stage('Docker Build and Push') { @@ -304,8 +325,35 @@ pipeline { options { timeout(time: 3, unit: 'HOURS') } - steps { - clusterRunner('cluster1') + parallel { + stage('cluster1') { + agent { label 'docker' } + steps { + prepareAgent() + clusterRunner('cluster1') + } + } + stage('cluster2') { + agent { label 'docker' } + steps { + prepareAgent() + clusterRunner('cluster2') + } + } + stage('cluster3') { + agent { label 'docker' } + steps { + prepareAgent() + clusterRunner('cluster3') + } + } + stage('cluster4') { + agent { label 'docker' } + steps { + prepareAgent() + clusterRunner('cluster4') + } + } } } } @@ -321,11 +369,6 @@ pipeline { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } } - - sh """ - minikube delete || true - """ - deleteDir() } } } From 671677b3bf6442bab43e4b97c7385c79d22a709b Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 5 Feb 2025 13:54:27 +0200 Subject: [PATCH 14/24] Refactor Jenkins pipeline: replace 'docker' agent with 'docker-32gb' for improved resource allocation --- cloud/jenkins/pso-minikube.groovy | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso-minikube.groovy index cbe2daa34a..f28b4636e2 100644 --- a/cloud/jenkins/pso-minikube.groovy +++ b/cloud/jenkins/pso-minikube.groovy @@ -306,7 +306,6 @@ pipeline { stages { stage('Prepare Node') { steps { - prepareAgent() prepareSources() initParams() } @@ -327,28 +326,28 @@ pipeline { } parallel { stage('cluster1') { - agent { label 'docker' } + agent { label 'docker-32gb' } steps { prepareAgent() clusterRunner('cluster1') } } stage('cluster2') { - agent { label 'docker' } + agent { label 'docker-32gb' } steps { prepareAgent() clusterRunner('cluster2') } } stage('cluster3') { - agent { label 'docker' } + agent { label 'docker-32gb' } steps { prepareAgent() clusterRunner('cluster3') } } stage('cluster4') { - agent { label 'docker' } + agent { label 'docker-32gb' } steps { prepareAgent() clusterRunner('cluster4') From 372aac1a46aa2d429ad7dd6db9490a88bc661fc5 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Wed, 5 Feb 2025 17:54:03 +0200 Subject: [PATCH 15/24] adjust GKE release channel filtering --- cloud/jenkins/pso-gke.groovy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso-gke.groovy index 9e4e5de092..580c3b0681 100644 --- a/cloud/jenkins/pso-gke.groovy +++ b/cloud/jenkins/pso-gke.groovy @@ -47,7 +47,7 @@ EOF sudo yum install -y google-cloud-cli google-cloud-cli-gke-gcloud-auth-plugin """ - withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) { + withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) { sh """ gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE gcloud config set project $GCP_PROJECT @@ -96,7 +96,7 @@ void initParams() { } if ("$PLATFORM_VER" == "latest") { - PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", returnStdout: true).trim() + PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=$GKE_RELEASE_CHANNEL' --format='value(channels.validVersions)' | cut -d- -f1", returnStdout: true).trim() } if ("$IMAGE_MYSQL") { From ace5fc6cfc83443a5b936a77864939db29eabe90 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Fri, 7 Feb 2025 11:28:09 +0200 Subject: [PATCH 16/24] bringing back underscore (_) in groovy file naming, as per Eleonora's request --- cloud/jenkins/{job-pso-eks.yml => pso-eks.yml} | 2 +- cloud/jenkins/{job-pso-gke.yml => pso-gke.yml} | 2 +- cloud/jenkins/{job-pso-minikube.yml => pso-minikube.yml} | 2 +- cloud/jenkins/{pso-eks.groovy => pso_eks.groovy} | 0 cloud/jenkins/{pso-gke.groovy => pso_gke.groovy} | 0 cloud/jenkins/{pso-minikube.groovy => pso_minikube.groovy} | 0 cloud/jenkins/{job-weekly-pso.yml => weekly-pso.yml} | 2 +- cloud/jenkins/{weekly-pso.groovy => weekly_pso.groovy} | 0 8 files changed, 4 insertions(+), 4 deletions(-) rename cloud/jenkins/{job-pso-eks.yml => pso-eks.yml} (87%) rename cloud/jenkins/{job-pso-gke.yml => pso-gke.yml} (91%) rename cloud/jenkins/{job-pso-minikube.yml => pso-minikube.yml} (90%) rename cloud/jenkins/{pso-eks.groovy => pso_eks.groovy} (100%) rename cloud/jenkins/{pso-gke.groovy => pso_gke.groovy} (100%) rename cloud/jenkins/{pso-minikube.groovy => pso_minikube.groovy} (100%) rename cloud/jenkins/{job-weekly-pso.yml => weekly-pso.yml} (88%) rename cloud/jenkins/{weekly-pso.groovy => weekly_pso.groovy} (100%) diff --git a/cloud/jenkins/job-pso-eks.yml b/cloud/jenkins/pso-eks.yml similarity index 87% rename from cloud/jenkins/job-pso-eks.yml rename to cloud/jenkins/pso-eks.yml index 39cb839ae7..66dbeea465 100644 --- a/cloud/jenkins/job-pso-eks.yml +++ b/cloud/jenkins/pso-eks.yml @@ -11,4 +11,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/pso-eks.groovy + script-path: cloud/jenkins/pso_eks.groovy diff --git a/cloud/jenkins/job-pso-gke.yml b/cloud/jenkins/pso-gke.yml similarity index 91% rename from cloud/jenkins/job-pso-gke.yml rename to cloud/jenkins/pso-gke.yml index 8ebd0d4d82..df166f36dc 100644 --- a/cloud/jenkins/job-pso-gke.yml +++ b/cloud/jenkins/pso-gke.yml @@ -18,4 +18,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/pso-gke.groovy + script-path: cloud/jenkins/pso_gke.groovy diff --git a/cloud/jenkins/job-pso-minikube.yml b/cloud/jenkins/pso-minikube.yml similarity index 90% rename from cloud/jenkins/job-pso-minikube.yml rename to cloud/jenkins/pso-minikube.yml index 41ead27a33..1e9d28338d 100644 --- a/cloud/jenkins/job-pso-minikube.yml +++ b/cloud/jenkins/pso-minikube.yml @@ -18,4 +18,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/pso-minikube.groovy + script-path: cloud/jenkins/pso_minikube.groovy diff --git a/cloud/jenkins/pso-eks.groovy b/cloud/jenkins/pso_eks.groovy similarity index 100% rename from cloud/jenkins/pso-eks.groovy rename to cloud/jenkins/pso_eks.groovy diff --git a/cloud/jenkins/pso-gke.groovy b/cloud/jenkins/pso_gke.groovy similarity index 100% rename from cloud/jenkins/pso-gke.groovy rename to cloud/jenkins/pso_gke.groovy diff --git a/cloud/jenkins/pso-minikube.groovy b/cloud/jenkins/pso_minikube.groovy similarity index 100% rename from cloud/jenkins/pso-minikube.groovy rename to cloud/jenkins/pso_minikube.groovy diff --git a/cloud/jenkins/job-weekly-pso.yml b/cloud/jenkins/weekly-pso.yml similarity index 88% rename from cloud/jenkins/job-weekly-pso.yml rename to cloud/jenkins/weekly-pso.yml index a5e95f9ea6..8715ce89f4 100644 --- a/cloud/jenkins/job-weekly-pso.yml +++ b/cloud/jenkins/weekly-pso.yml @@ -13,4 +13,4 @@ - 'master' wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/weekly-pso.groovy \ No newline at end of file + script-path: cloud/jenkins/weekly_pso.groovy \ No newline at end of file diff --git a/cloud/jenkins/weekly-pso.groovy b/cloud/jenkins/weekly_pso.groovy similarity index 100% rename from cloud/jenkins/weekly-pso.groovy rename to cloud/jenkins/weekly_pso.groovy From 1299abca9a2b417f1dfda1e89a836015ed9ce909 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 17:24:37 +0200 Subject: [PATCH 17/24] Fix: Remove unnecessary line break and add error logging in test execution --- .../ps_operator_aws_openshift-4.groovy | 325 ------------ cloud/jenkins/pso_eks.groovy | 2 +- cloud/jenkins/pso_gke.groovy | 2 +- cloud/jenkins/pso_openshift.groovy | 478 ++++++++++++++++++ 4 files changed, 480 insertions(+), 327 deletions(-) delete mode 100644 cloud/jenkins/ps_operator_aws_openshift-4.groovy create mode 100644 cloud/jenkins/pso_openshift.groovy diff --git a/cloud/jenkins/ps_operator_aws_openshift-4.groovy b/cloud/jenkins/ps_operator_aws_openshift-4.groovy deleted file mode 100644 index a23f82069e..0000000000 --- a/cloud/jenkins/ps_operator_aws_openshift-4.groovy +++ /dev/null @@ -1,325 +0,0 @@ -void pushArtifactFile(String FILE_NAME) { - echo "Push $FILE_NAME file to S3!" - - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - sh """ - touch ${FILE_NAME} - S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source rev-parse --short HEAD) - aws s3 ls \$S3_PATH/${FILE_NAME} || : - aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || : - """ - } -} - -void popArtifactFile(String FILE_NAME) { - echo "Try to get $FILE_NAME file from S3!" - - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - sh """ - S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source rev-parse --short HEAD) - aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || : - """ - } -} - -TestsReport = '\n' -testsReportMap = [:] -void makeReport() { - for ( test in testsReportMap ) { - TestsReport = TestsReport + "<${test.value}/>\n" - } - TestsReport = TestsReport + '\n' -} - -void runTest(String TEST_NAME) { - def retryCount = 0 - waitUntil { - try { - echo "The $TEST_NAME test was started!" - testsReportMap[TEST_NAME] = 'failure' - - def FILE_NAME = "$GIT_BRANCH-$GIT_SHORT_COMMIT-$TEST_NAME-eks-$PLATFORM_VER-$PARAMS_HASH" - popArtifactFile("$FILE_NAME") - - timeout(time: 90, unit: 'MINUTES') { - sh """ - if [ -f "$FILE_NAME" ]; then - echo "Skipping $TEST_NAME test because it passed in previous run." - else - cd source - - [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH - export IMAGE_MYSQL=$IMAGE_MYSQL - export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR - export IMAGE_ROUTER=$IMAGE_ROUTER - export IMAGE_HAPROXY=$IMAGE_HAPROXY - export IMAGE_BACKUP=$IMAGE_BACKUP - export IMAGE_TOOLKIT=$IMAGE_TOOLKIT - export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT - export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER - - export PATH="${HOME}/.krew/bin:$PATH" - source $HOME/google-cloud-sdk/path.bash.inc - export KUBECONFIG=$WORKSPACE/openshift/auth/kubeconfig - oc whoami - - kubectl kuttl test --config ./e2e-tests/kuttl.yaml --test "^${TEST_NAME}\$" - fi - """ - } - pushArtifactFile("$FILE_NAME") - testsReportMap[TEST_NAME] = 'passed' - return true - } - catch (exc) { - if (retryCount >= 2) { - currentBuild.result = 'FAILURE' - return true - } - retryCount++ - return false - } - } - - echo "The $TEST_NAME test was finished!" -} - -void conditionalRunTest(String TEST_NAME) { - if ( TEST_NAME == 'default-cr' ) { - if ( params.GIT_BRANCH.contains('release-') ) { - runTest(TEST_NAME) - } - return 0 - } - runTest(TEST_NAME) -} - -void installRpms() { - sh """ - sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true - sudo percona-release enable-only tools - """ -} -pipeline { - parameters { - string( - defaultValue: '4.7.22', - description: 'OpenShift version to use', - name: 'PLATFORM_VER') - string( - defaultValue: 'main', - description: 'Tag/Branch for percona/percona-server-mysql-operator repository', - name: 'GIT_BRANCH') - string( - defaultValue: 'https://github.com/percona/percona-server-mysql-operator', - description: 'percona-server-mysql-operator repository', - name: 'GIT_REPO') - string( - defaultValue: '', - description: 'Operator image: perconalab/percona-server-mysql-operator:main', - name: 'OPERATOR_IMAGE') - string( - defaultValue: '', - description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0', - name: 'IMAGE_MYSQL') - string( - defaultValue: '', - description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator', - name: 'IMAGE_ORCHESTRATOR') - string( - defaultValue: '', - description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router', - name: 'IMAGE_ROUTER') - string( - defaultValue: '', - description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup', - name: 'IMAGE_BACKUP') - string( - defaultValue: '', - description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit', - name: 'IMAGE_TOOLKIT') - string( - defaultValue: '', - description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy', - name: 'IMAGE_HAPROXY') - string( - defaultValue: '', - description: 'PMM client image: perconalab/pmm-client:dev-latest', - name: 'IMAGE_PMM_CLIENT') - string( - defaultValue: '', - description: 'PMM server image: perconalab/pmm-server:dev-latest', - name: 'IMAGE_PMM_SERVER') - } - environment { - TF_IN_AUTOMATION = 'true' - CLEAN_NAMESPACE = 1 - } - agent { - label 'docker' - } - options { - buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) - skipDefaultCheckout() - disableConcurrentBuilds() - } - - stages { - stage('Prepare') { - steps { - sh """ - wget https://releases.hashicorp.com/terraform/0.11.14/terraform_0.11.14_linux_amd64.zip - unzip -o terraform_0.11.14_linux_amd64.zip - sudo mv terraform /usr/local/bin/ && rm terraform_0.11.14_linux_amd64.zip - """ - installRpms() - sh ''' - if [ ! -d $HOME/google-cloud-sdk/bin ]; then - rm -rf $HOME/google-cloud-sdk - curl https://sdk.cloud.google.com | bash - fi - - source $HOME/google-cloud-sdk/path.bash.inc - gcloud components update kubectl - gcloud version - - curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \ - | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf - - - sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/v4.34.1/yq_linux_amd64 > /usr/local/bin/yq" - sudo chmod +x /usr/local/bin/yq - sudo sh -c "curl -s -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 > /usr/local/bin/jq" - sudo chmod +x /usr/local/bin/jq - - curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$PLATFORM_VER/openshift-client-linux-$PLATFORM_VER.tar.gz \ - | sudo tar -C /usr/local/bin --wildcards -zxvpf - - curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$PLATFORM_VER/openshift-install-linux-$PLATFORM_VER.tar.gz \ - | sudo tar -C /usr/local/bin --wildcards -zxvpf - - - cd "$(mktemp -d)" - OS="$(uname | tr '[:upper:]' '[:lower:]')" - ARCH="$(uname -m | sed -e 's/x86_64/amd64/')" - KREW="krew-${OS}_${ARCH}" - curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/download/v0.4.2/${KREW}.tar.gz" - tar zxvf "${KREW}.tar.gz" - ./"${KREW}" install krew - - export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" - - kubectl krew install kuttl - kubectl krew install assert - ''' - - } - } - stage('Build docker image') { - steps { - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' - withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER'), file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { - sh ''' - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source - ./cloud/local/checkout $GIT_REPO $GIT_BRANCH - - cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml - - if [[ "$OPERATOR_IMAGE" ]]; then - echo "SKIP: Build is not needed, operator image was set!" - else - cd ./source/ - sg docker -c " - docker login -u '${USER}' -p '${PASS}' - export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH - ./e2e-tests/build - docker logout - " - sudo rm -rf ./build - fi - ''' - } - } - } - stage('Create AWS Infrastructure') { - steps { - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift4-secret-file', variable: 'OPENSHIFT_CONF_FILE')]) { - sh """ - mkdir openshift - cp $OPENSHIFT_CONF_FILE ./openshift/install-config.yaml - sed -i 's/pxc/ps/g' ./openshift/install-config.yaml - """ - sshagent(['aws-openshift-41-key']) { - sh """ - /usr/local/bin/openshift-install create cluster --dir=./openshift/ - """ - } - } - - } - } - stage('E2E Basic Tests') { - environment { - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim() - PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim() - } - options { - timeout(time: 3, unit: 'HOURS') - } - steps { - runTest('async-ignore-annotations') - runTest('auto-config') - runTest('config') - runTest('config-router') - runTest('demand-backup') - runTest('gr-demand-backup') - runTest('gr-ignore-annotations') - runTest('gr-init-deploy') - runTest('gr-one-pod') - runTest('gr-scaling') - runTest('gr-tls-cert-manager') - runTest('haproxy') - runTest('init-deploy') - runTest('limits') - runTest('monitoring') - runTest('one-pod') - runTest('scaling') - runTest('semi-sync') - runTest('service-per-pod') - runTest('sidecars') - runTest('tls-cert-manager') - runTest('users') - runTest('version-service') - } - } - stage('Make report') { - steps { - makeReport() - sh """ - echo "${TestsReport}" > TestsReport.xml - """ - step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) - archiveArtifacts '*.xml' - } - } - } - - post { - always { - withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) { - sshagent(['aws-openshift-41-key']) { - sh """ - /usr/local/bin/openshift-install destroy cluster --dir=./openshift/ - """ - } - } - - sh ''' - sudo docker rmi -f \$(sudo docker images -q) || true - sudo rm -rf $HOME/google-cloud-sdk - sudo rm -rf ./* - ''' - deleteDir() - } - } -} diff --git a/cloud/jenkins/pso_eks.groovy b/cloud/jenkins/pso_eks.groovy index c0c82bda29..795967a6e2 100644 --- a/cloud/jenkins/pso_eks.groovy +++ b/cloud/jenkins/pso_eks.groovy @@ -54,7 +54,7 @@ void prepareSources() { GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() } void initParams() { diff --git a/cloud/jenkins/pso_gke.groovy b/cloud/jenkins/pso_gke.groovy index 580c3b0681..520c2b596f 100644 --- a/cloud/jenkins/pso_gke.groovy +++ b/cloud/jenkins/pso_gke.groovy @@ -247,7 +247,6 @@ void runTest(Integer TEST_ID) { def clusterSuffix = tests[TEST_ID]["cluster"] unstash "sourceFILES" - waitUntil { def timeStart = new Date().getTime() try { @@ -280,6 +279,7 @@ void runTest(Integer TEST_ID) { return true } catch (exc) { + echo "Error occurred while running test $testName: $exc" if (retryCount >= 1) { currentBuild.result = 'FAILURE' return true diff --git a/cloud/jenkins/pso_openshift.groovy b/cloud/jenkins/pso_openshift.groovy new file mode 100644 index 0000000000..90ec8b2289 --- /dev/null +++ b/cloud/jenkins/pso_openshift.groovy @@ -0,0 +1,478 @@ +region='eu-west-2' +tests=[] +clusters=[] +release_versions="source/e2e-tests/release_versions" + +String getParam(String paramName, String keyName = null) { + keyName = keyName ?: paramName + + param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim() + if ("$param") { + echo "$paramName=$param (from params file)" + } else { + error("$keyName not found in params file $release_versions") + } + return param +} + +void prepareAgent() { + echo "=========================[ Installing tools on the Jenkins executor ]=========================" + sh """ + sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl + kubectl version --client --output=yaml + + curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm + + sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq + sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq + + curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf - + ./krew-linux_amd64 install krew + export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" + + kubectl krew install assert + + # v0.17.0 kuttl version + kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml + echo \$(kubectl kuttl --version) is installed + + curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OC_VER/openshift-client-linux.tar.gz | sudo tar -C /usr/local/bin -xzf - oc + curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$PLATFORM_VER/openshift-install-linux.tar.gz | sudo tar -C /usr/local/bin -xzf - openshift-install + """ +} + +void prepareSources() { + echo "=========================[ Cloning the sources ]=========================" + git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' + sh """ + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source + """ + + GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() + PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() +} + +void initParams() { + if ("$PILLAR_VERSION" != "none") { + echo "=========================[ Getting parameters for release test ]=========================" + IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR") + IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}") + IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}") + IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}") + IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY") + IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR") + IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT") + IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT") + IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER") + if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") { + PLATFORM_VER = getParam("PLATFORM_VER", "OPENSHIFT_${PLATFORM_VER}") + } + } else { + echo "=========================[ Not a release run. Using job params only! ]=========================" + } + + if ("$PLATFORM_VER" == "latest") { + OC_VER = "4.15.25" + PLATFORM_VER = sh(script: "curl -s https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/$PLATFORM_VER/release.txt | sed -n 's/^\\s*Version:\\s\\+\\(\\S\\+\\)\\s*\$/\\1/p'", returnStdout: true).trim() + } else { + if ("$PLATFORM_VER" <= "4.15.25") { + OC_VER="$PLATFORM_VER" + } else { + OC_VER="4.15.25" + } + } + echo "OC_VER=$OC_VER" + + if ("$IMAGE_MYSQL") { + cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW" + currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH" + currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw" + } +} + +void dockerBuildPush() { + echo "=========================[ Building and Pushing the operator Docker image ]=========================" + withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) { + sh """ + if [[ "$IMAGE_OPERATOR" ]]; then + echo "SKIP: Build is not needed, operator image was set!" + else + cd source + sg docker -c " + docker login -u '$USER' -p '$PASS' + export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH + e2e-tests/build + docker logout + " + sudo rm -rf build + fi + """ + } +} + +void initTests() { + echo "=========================[ Initializing the tests ]=========================" + + echo "Populating tests into the tests array!" + def testList = "$TEST_LIST" + def suiteFileName = "source/e2e-tests/$TEST_SUITE" + + if (testList.length() != 0) { + suiteFileName = 'source/e2e-tests/run-custom.csv' + sh """ + echo -e "$testList" > $suiteFileName + echo "Custom test suite contains following tests:" + cat $suiteFileName + """ + } + + def records = readCSV file: suiteFileName + + for (int i=0; i/dev/null 2>&1", returnStatus: true) + + if (retFileExists == 0) { + tests[i]["result"] = "passed" + } + } + } else { + sh """ + aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || : + """ + } + } + + withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { + sh """ + cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE source/e2e-tests/conf/cloud-secret-minio-gw.yml + """ + } + stash includes: "source/**", name: "sourceFILES" +} + +void clusterRunner(String cluster) { + def clusterCreated=0 + + for (int i=0; i= 1) { + shutdownCluster(cluster) + } +} + +void createCluster(String CLUSTER_SUFFIX) { + clusters.add("$CLUSTER_SUFFIX") + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift4-secrets', variable: 'OPENSHIFT_CONF_FILE')]) { + sh """ + mkdir -p openshift/$CLUSTER_SUFFIX + timestamp="\$(date +%s)" +tee openshift/$CLUSTER_SUFFIX/install-config.yaml << EOF +additionalTrustBundlePolicy: Proxyonly +credentialsMode: Mint +apiVersion: v1 +baseDomain: cd.percona.com +compute: +- architecture: amd64 + hyperthreading: Enabled + name: worker + platform: + aws: + type: m5.2xlarge + replicas: 3 +controlPlane: + architecture: amd64 + hyperthreading: Enabled + name: master + platform: {} + replicas: 1 +metadata: + creationTimestamp: null + name: $CLUSTER_NAME-$CLUSTER_SUFFIX +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + machineNetwork: + - cidr: 10.0.0.0/16 + networkType: OVNKubernetes + serviceNetwork: + - 172.30.0.0/16 +platform: + aws: + region: $region + userTags: + iit-billing-tag: openshift + delete-cluster-after-hours: 8 + team: cloud + product: ps-operator + creation-time: \$timestamp + +publish: External +EOF + cat $OPENSHIFT_CONF_FILE >> openshift/$CLUSTER_SUFFIX/install-config.yaml + """ + + sshagent(['aws-openshift-41-key']) { + sh """ + /usr/local/bin/openshift-install create cluster --dir=openshift/$CLUSTER_SUFFIX + export KUBECONFIG=openshift/$CLUSTER_SUFFIX/auth/kubeconfig + """ + } + } +} + +void runTest(Integer TEST_ID) { + def retryCount = 0 + def testName = tests[TEST_ID]["name"] + def clusterSuffix = tests[TEST_ID]["cluster"] + + waitUntil { + def timeStart = new Date().getTime() + try { + echo "The $testName test was started on cluster $CLUSTER_NAME-$clusterSuffix !" + tests[TEST_ID]["result"] = "failure" + + timeout(time: 90, unit: 'MINUTES') { + sh """ + cd source + + export DEBUG_TESTS=1 + [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator + export IMAGE=$IMAGE_OPERATOR + export IMAGE_MYSQL=$IMAGE_MYSQL + export IMAGE_BACKUP=$IMAGE_BACKUP + export IMAGE_ROUTER=$IMAGE_ROUTER + export IMAGE_HAPROXY=$IMAGE_HAPROXY + export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + export IMAGE_TOOLKIT=$IMAGE_TOOLKIT + export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT + export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER + export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix + export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH" + + kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$" + """ + } + pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$PLATFORM_VER-$DB_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH") + tests[TEST_ID]["result"] = "passed" + return true + } + catch (exc) { + echo "Error occurred while running test $testName: $exc" + if (retryCount >= 1) { + currentBuild.result = 'FAILURE' + return true + } + retryCount++ + return false + } + finally { + def timeStop = new Date().getTime() + def durationSec = (timeStop - timeStart) / 1000 + tests[TEST_ID]["time"] = durationSec + echo "The $testName test was finished!" + } + } +} + +void pushArtifactFile(String FILE_NAME) { + echo "Push $FILE_NAME file to S3!" + + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { + sh """ + touch $FILE_NAME + S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT + aws s3 ls \$S3_PATH/$FILE_NAME || : + aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || : + """ + } +} + +void makeReport() { + echo "=========================[ Generating Test Report ]=========================" + testsReport = "\n" + for (int i = 0; i < tests.size(); i ++) { + testsReport += '<'+ tests[i]["result"] +'/>\n' + } + testsReport += '\n' + + echo "=========================[ Generating Parameters Report ]=========================" + pipelineParameters = """ + testsuite name=$JOB_NAME + IMAGE_OPERATOR=$IMAGE_OPERATOR + IMAGE_MYSQL=$IMAGE_MYSQL + IMAGE_BACKUP=$IMAGE_BACKUP + IMAGE_ROUTER=$IMAGE_ROUTER + IMAGE_HAPROXY=$IMAGE_HAPROXY + IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR + IMAGE_TOOLKIT=$IMAGE_TOOLKIT + IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT + IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER + PLATFORM_VER=$PLATFORM_VER + """ + + writeFile file: "TestsReport.xml", text: testsReport + writeFile file: 'PipelineParameters.txt', text: pipelineParameters +} + +void shutdownCluster(String CLUSTER_SUFFIX) { + withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) { + sshagent(['aws-openshift-41-key']) { + sh """ + export KUBECONFIG=$WORKSPACE/openshift/$CLUSTER_SUFFIX/auth/kubeconfig + for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do + kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true + kubectl delete sts --all -n \$namespace --force --grace-period=0 || true + kubectl delete replicasets --all -n \$namespace --force --grace-period=0 || true + kubectl delete poddisruptionbudget --all -n \$namespace --force --grace-period=0 || true + kubectl delete services --all -n \$namespace --force --grace-period=0 || true + kubectl delete pods --all -n \$namespace --force --grace-period=0 || true + done + kubectl get svc --all-namespaces || true + /usr/local/bin/openshift-install destroy cluster --dir=openshift/$CLUSTER_SUFFIX || true + """ + } + } +} + +pipeline { + environment { + DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() + } + parameters { + choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') + text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line') + choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)') + choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.') + string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository') + string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'OpenShift kubernetes version. If set to min or max, value will be automatically taken from release_versions file.') + choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode') + string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main') + string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql') + string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup') + string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router') + string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy') + string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator') + string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit') + string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest') + string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest') + } + agent { + label 'docker' + } + options { + buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) + skipDefaultCheckout() + disableConcurrentBuilds() + copyArtifactPermission('pgo-weekly'); + } + stages { + stage('Prepare Node') { + steps { + script { deleteDir() } + prepareSources() + initParams() + prepareAgent() + } + } + stage('Docker Build and Push') { + steps { + dockerBuildPush() + } + } + stage('Init Tests') { + steps { + initTests() + } + } + stage('Run Tests') { + options { + timeout(time: 3, unit: 'HOURS') + } + parallel { + stage('cluster1') { + agent { + label 'docker' + } + steps { + prepareAgent() + unstash "sourceFILES" + clusterRunner('c1') + } + } + stage('cluster2') { + agent { + label 'docker' + } + steps { + prepareAgent() + unstash "sourceFILES" + clusterRunner('c2') + } + } + stage('cluster3') { + agent { + label 'docker' + } + steps { + prepareAgent() + unstash "sourceFILES" + clusterRunner('c3') + } + } + stage('cluster4') { + agent { + label 'docker' + } + steps { + prepareAgent() + unstash "sourceFILES" + clusterRunner('c4') + } + } + } + } + } + post { + always { + echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","") + makeReport() + step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0]) + archiveArtifacts '*.xml,*.txt' + + script { + if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { + slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" + } + + clusters.each { shutdownCluster(it) } + } + } + } +} From cbc04cd0f7ec28a753ebb93b6fc3709ff7337293 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 18:00:25 +0200 Subject: [PATCH 18/24] Add new Jenkins pipeline jobs for pso-eks-2, pso-gke-2, pso-os-2, and pso-os with build discarder properties --- ...ator-aws-openshift-4.yml => pso-eks-2.yml} | 4 ++-- cloud/jenkins/pso-gke-2.yml | 21 +++++++++++++++++++ cloud/jenkins/pso-openshift-2.yml | 21 +++++++++++++++++++ cloud/jenkins/pso-openshift.yml | 21 +++++++++++++++++++ 4 files changed, 65 insertions(+), 2 deletions(-) rename cloud/jenkins/{ps-operator-aws-openshift-4.yml => pso-eks-2.yml} (75%) create mode 100644 cloud/jenkins/pso-gke-2.yml create mode 100644 cloud/jenkins/pso-openshift-2.yml create mode 100644 cloud/jenkins/pso-openshift.yml diff --git a/cloud/jenkins/ps-operator-aws-openshift-4.yml b/cloud/jenkins/pso-eks-2.yml similarity index 75% rename from cloud/jenkins/ps-operator-aws-openshift-4.yml rename to cloud/jenkins/pso-eks-2.yml index 120dd25833..aa85b5b5a4 100644 --- a/cloud/jenkins/ps-operator-aws-openshift-4.yml +++ b/cloud/jenkins/pso-eks-2.yml @@ -1,5 +1,5 @@ - job: - name: ps-operator-aws-openshift-4 + name: pso-eks-2 project-type: pipeline description: | Do not edit this job through the web! @@ -11,4 +11,4 @@ - master wipe-workspace: false lightweight-checkout: true - script-path: cloud/jenkins/ps_operator_aws_openshift-4.groovy + script-path: cloud/jenkins/pso_eks.groovy diff --git a/cloud/jenkins/pso-gke-2.yml b/cloud/jenkins/pso-gke-2.yml new file mode 100644 index 0000000000..3171805ec9 --- /dev/null +++ b/cloud/jenkins/pso-gke-2.yml @@ -0,0 +1,21 @@ +- job: + name: pso-gke-2 + project-type: pipeline + description: | + Do not edit this job through the web! + concurrent: false + properties: + - build-discarder: + days-to-keep: -1 + num-to-keep: 10 + artifact-days-to-keep: -1 + artifact-num-to-keep: 10 + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - master + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pso_gke.groovy diff --git a/cloud/jenkins/pso-openshift-2.yml b/cloud/jenkins/pso-openshift-2.yml new file mode 100644 index 0000000000..786dcb9cfd --- /dev/null +++ b/cloud/jenkins/pso-openshift-2.yml @@ -0,0 +1,21 @@ +- job: + name: pso-os-2 + project-type: pipeline + description: | + Do not edit this job through the web! + concurrent: false + properties: + - build-discarder: + days-to-keep: -1 + num-to-keep: 10 + artifact-days-to-keep: -1 + artifact-num-to-keep: 10 + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - master + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pso_openshift.groovy diff --git a/cloud/jenkins/pso-openshift.yml b/cloud/jenkins/pso-openshift.yml new file mode 100644 index 0000000000..3bb9a983c3 --- /dev/null +++ b/cloud/jenkins/pso-openshift.yml @@ -0,0 +1,21 @@ +- job: + name: pso-os + project-type: pipeline + description: | + Do not edit this job through the web! + concurrent: false + properties: + - build-discarder: + days-to-keep: -1 + num-to-keep: 10 + artifact-days-to-keep: -1 + artifact-num-to-keep: 10 + pipeline-scm: + scm: + - git: + url: https://github.com/Percona-Lab/jenkins-pipelines.git + branches: + - master + wipe-workspace: false + lightweight-checkout: true + script-path: cloud/jenkins/pso_openshift.groovy From 2556c359060463c584e245fffa8c8d41034c1937 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 18:01:17 +0200 Subject: [PATCH 19/24] Add stage to trigger 'pso-os' job multiple times and copy artifacts from 'pso-os' --- cloud/jenkins/weekly_pso.groovy | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/jenkins/weekly_pso.groovy b/cloud/jenkins/weekly_pso.groovy index 0852ea0a03..dc5015cc05 100644 --- a/cloud/jenkins/weekly_pso.groovy +++ b/cloud/jenkins/weekly_pso.groovy @@ -27,6 +27,11 @@ pipeline { triggerJobMultiple("pso-eks") } } + stage('Trigger psmo-os job 3 times') { + steps { + triggerJobMultiple("pso-os") + } + } } } } @@ -34,6 +39,7 @@ pipeline { always { copyArtifacts(projectName: 'pso-gke', selector: lastCompleted(), target: 'pso-gke') copyArtifacts(projectName: 'pso-eks', selector: lastCompleted(), target: 'pso-eks') + copyArtifacts(projectName: 'pso-os', selector: lastCompleted(), target: 'pso-os') archiveArtifacts '*/*.xml' step([$class: 'JUnitResultArchiver', testResults: '*/*.xml', healthScaleFactor: 1.0]) } From c4bcced65a58368f9477ce9a0736476c048f4370 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 19:16:24 +0200 Subject: [PATCH 20/24] Fix: Remove extra comma in AWS CLI command for platform version retrieval --- cloud/jenkins/pso_eks.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/jenkins/pso_eks.groovy b/cloud/jenkins/pso_eks.groovy index 795967a6e2..459f9ddcce 100644 --- a/cloud/jenkins/pso_eks.groovy +++ b/cloud/jenkins/pso_eks.groovy @@ -78,7 +78,7 @@ void initParams() { if ("$PLATFORM_VER" == "latest") { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) { - PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim() + PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", returnStdout: true).trim() } } From 9df778cb5ef117145d082573d4b67754d085e478 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 19:46:34 +0200 Subject: [PATCH 21/24] Fix: Correct stage names and update credential handling in Jenkins pipeline scripts --- cloud/jenkins/pso_eks.groovy | 31 +++++++++++----------- cloud/jenkins/pso_gke.groovy | 41 +++++++++++++----------------- cloud/jenkins/pso_minikube.groovy | 39 ++++------------------------ cloud/jenkins/pso_openshift.groovy | 21 +++++---------- cloud/jenkins/weekly_pso.groovy | 4 +-- 5 files changed, 46 insertions(+), 90 deletions(-) diff --git a/cloud/jenkins/pso_eks.groovy b/cloud/jenkins/pso_eks.groovy index 459f9ddcce..690de7302c 100644 --- a/cloud/jenkins/pso_eks.groovy +++ b/cloud/jenkins/pso_eks.groovy @@ -1,5 +1,6 @@ region='eu-west-2' tests=[] +clusters=[] release_versions="source/e2e-tests/release_versions" String getParam(String paramName, String keyName = null) { @@ -43,12 +44,6 @@ void prepareSources() { echo "=========================[ Cloning the sources ]=========================" git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ @@ -163,19 +158,23 @@ void initTests() { } void clusterRunner(String cluster) { - def clusterCreated = false + def clusterCreated=0 for (int i=0; i= 1) { + shutdownCluster(cluster) + } } void createCluster(String CLUSTER_SUFFIX) { @@ -233,7 +232,6 @@ void runTest(Integer TEST_ID) { def testName = tests[TEST_ID]["name"] def clusterSuffix = tests[TEST_ID]["cluster"] - unstash "sourceFILES" waitUntil { def timeStart = new Date().getTime() try { @@ -398,11 +396,12 @@ pipeline { buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) skipDefaultCheckout() disableConcurrentBuilds() - copyArtifactPermission('ps-operator-latest-scheduler'); + copyArtifactPermission('weekly-pso'); } stages { stage('Prepare Node') { steps { + script { deleteDir() } prepareAgent() prepareSources() initParams() @@ -427,33 +426,33 @@ pipeline { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster1') } - post { always { script { shutdownCluster('cluster1') } } } } stage('cluster2') { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster2') } - post { always { script { shutdownCluster('cluster2') } } } } stage('cluster3') { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster3') } - post { always { script { shutdownCluster('cluster3') } } } } stage('cluster4') { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster4') } - post { always { script { shutdownCluster('cluster4') } } } } } } @@ -469,6 +468,8 @@ pipeline { if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } + + clusters.each { shutdownCluster(it) } } } } diff --git a/cloud/jenkins/pso_gke.groovy b/cloud/jenkins/pso_gke.groovy index 520c2b596f..0af767f0a9 100644 --- a/cloud/jenkins/pso_gke.groovy +++ b/cloud/jenkins/pso_gke.groovy @@ -1,5 +1,6 @@ region='us-central1-a' tests=[] +clusters=[] release_versions="source/e2e-tests/release_versions" String getParam(String paramName, String keyName = null) { @@ -59,18 +60,12 @@ void prepareSources() { echo "=========================[ Cloning the sources ]=========================" git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() - CLUSTER_NAME = sh(script: "echo jenkins-$JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() + CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() } void initParams() { @@ -180,19 +175,23 @@ void initTests() { } void clusterRunner(String cluster) { - def clusterCreated = false + def clusterCreated=0 for (int i=0; i= 1) { + shutdownCluster(cluster) + } } void createCluster(String CLUSTER_SUFFIX) { @@ -246,7 +245,6 @@ void runTest(Integer TEST_ID) { def testName = tests[TEST_ID]["name"] def clusterSuffix = tests[TEST_ID]["cluster"] - unstash "sourceFILES" waitUntil { def timeStart = new Date().getTime() try { @@ -384,11 +382,12 @@ pipeline { buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30')) skipDefaultCheckout() disableConcurrentBuilds() - copyArtifactPermission('ps-operator-latest-scheduler'); + copyArtifactPermission('weekly-pso'); } stages { stage('Prepare Node') { steps { + script { deleteDir() } prepareAgent() prepareSources() initParams() @@ -413,41 +412,33 @@ pipeline { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster1') } - post { always { script { shutdownCluster('cluster1') } } } } stage('cluster2') { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster2') } - post { always { script { shutdownCluster('cluster2') } } } } stage('cluster3') { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster3') } - post { always { script { shutdownCluster('cluster3') } } } } stage('cluster4') { agent { label 'docker' } steps { prepareAgent() + unstash "sourceFILES" clusterRunner('cluster4') } - post { always { script { shutdownCluster('cluster4') } } } - } - stage('cluster5') { - agent { label 'docker' } - steps { - prepareAgent() - clusterRunner('cluster5') - } - post { always { script { shutdownCluster('cluster5') } } } } } } @@ -463,6 +454,8 @@ pipeline { if (currentBuild.result != null && currentBuild.result != 'SUCCESS') { slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL" } + + clusters.each { shutdownCluster(it) } } } } diff --git a/cloud/jenkins/pso_minikube.groovy b/cloud/jenkins/pso_minikube.groovy index f28b4636e2..f842f78fc3 100644 --- a/cloud/jenkins/pso_minikube.groovy +++ b/cloud/jenkins/pso_minikube.groovy @@ -42,12 +42,6 @@ void prepareSources() { echo "=========================[ Cloning the sources ]=========================" git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ - # sudo is needed for better node recovery after compilation failure - # if building failed on compilation stage directory will have files owned by docker user - sudo git config --global --add safe.directory '*' - sudo git reset --hard - sudo git clean -xdf - sudo rm -rf source git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ @@ -151,19 +145,18 @@ void initTests() { cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml """ } - stash includes: "source/**", name: "sourceFILES" } void clusterRunner(String cluster) { - def clusterCreated = false + def clusterCreated=0 for (int i=0; i Date: Mon, 17 Feb 2025 21:10:53 +0200 Subject: [PATCH 22/24] Fix: Escape dollar signs in DB_TAG assignment for consistency in Jenkins pipeline scripts --- cloud/jenkins/pso_eks.groovy | 2 +- cloud/jenkins/pso_gke.groovy | 2 +- cloud/jenkins/pso_minikube.groovy | 2 +- cloud/jenkins/pso_openshift.groovy | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/jenkins/pso_eks.groovy b/cloud/jenkins/pso_eks.groovy index 690de7302c..997a1e6543 100644 --- a/cloud/jenkins/pso_eks.groovy +++ b/cloud/jenkins/pso_eks.groovy @@ -369,7 +369,7 @@ void shutdownCluster(String CLUSTER_SUFFIX) { pipeline { environment { - DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() + DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') diff --git a/cloud/jenkins/pso_gke.groovy b/cloud/jenkins/pso_gke.groovy index 0af767f0a9..e60467d0c6 100644 --- a/cloud/jenkins/pso_gke.groovy +++ b/cloud/jenkins/pso_gke.groovy @@ -354,7 +354,7 @@ void shutdownCluster(String CLUSTER_SUFFIX) { pipeline { environment { - DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() + DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') diff --git a/cloud/jenkins/pso_minikube.groovy b/cloud/jenkins/pso_minikube.groovy index f842f78fc3..acd9c18eaf 100644 --- a/cloud/jenkins/pso_minikube.groovy +++ b/cloud/jenkins/pso_minikube.groovy @@ -266,7 +266,7 @@ void makeReport() { pipeline { environment { - DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() + DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { choice(name: 'TEST_SUITE', choices: ['run-minikube.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') diff --git a/cloud/jenkins/pso_openshift.groovy b/cloud/jenkins/pso_openshift.groovy index 055e666712..246f36cf8e 100644 --- a/cloud/jenkins/pso_openshift.groovy +++ b/cloud/jenkins/pso_openshift.groovy @@ -362,7 +362,7 @@ void shutdownCluster(String CLUSTER_SUFFIX) { pipeline { environment { - DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() + DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim() } parameters { choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.') From 073cf65014bd6da47e1ac947be706ee1ef2b5d27 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 21:13:27 +0200 Subject: [PATCH 23/24] Fix: Add handling for MinIO secret file in Jenkins pipeline for E2E tests --- cloud/jenkins/pso_openshift.groovy | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cloud/jenkins/pso_openshift.groovy b/cloud/jenkins/pso_openshift.groovy index 246f36cf8e..9e3ef21a51 100644 --- a/cloud/jenkins/pso_openshift.groovy +++ b/cloud/jenkins/pso_openshift.groovy @@ -43,11 +43,9 @@ void prepareAgent() { void prepareSources() { echo "=========================[ Cloning the sources ]=========================" - git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines' sh """ git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source """ - GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim() CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim() @@ -156,9 +154,10 @@ void initTests() { } } - withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) { + withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) { sh """ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml + cp $CLOUD_MINIO_SECRET_FILE source/e2e-tests/conf/cloud-secret-minio-gw.yml """ } stash includes: "source/**", name: "sourceFILES" From f906e5b9221747588dbda028bb1e95147afe33e4 Mon Sep 17 00:00:00 2001 From: Pavel Tankov <4014969+ptankov@users.noreply.github.com> Date: Mon, 17 Feb 2025 21:13:44 +0200 Subject: [PATCH 24/24] Fix: Update git clone command to include .git suffix for repository URL --- cloud/jenkins/pso_openshift.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/jenkins/pso_openshift.groovy b/cloud/jenkins/pso_openshift.groovy index 9e3ef21a51..a3011cd84b 100644 --- a/cloud/jenkins/pso_openshift.groovy +++ b/cloud/jenkins/pso_openshift.groovy @@ -44,7 +44,7 @@ void prepareAgent() { void prepareSources() { echo "=========================[ Cloning the sources ]=========================" sh """ - git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source + git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator.git source """ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim() PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim()