diff --git a/cloud/jenkins/ps-operator-aws-openshift-4.yml b/cloud/jenkins/ps-operator-aws-openshift-4.yml
deleted file mode 100644
index 120dd25833..0000000000
--- a/cloud/jenkins/ps-operator-aws-openshift-4.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-- job:
- name: ps-operator-aws-openshift-4
- project-type: pipeline
- description: |
- Do not edit this job through the web!
- pipeline-scm:
- scm:
- - git:
- url: https://github.com/Percona-Lab/jenkins-pipelines.git
- branches:
- - master
- wipe-workspace: false
- lightweight-checkout: true
- script-path: cloud/jenkins/ps_operator_aws_openshift-4.groovy
diff --git a/cloud/jenkins/ps-operator-latest-scheduler.groovy b/cloud/jenkins/ps-operator-latest-scheduler.groovy
deleted file mode 100644
index 76521edc2e..0000000000
--- a/cloud/jenkins/ps-operator-latest-scheduler.groovy
+++ /dev/null
@@ -1,119 +0,0 @@
-library changelog: false, identifier: 'lib@master', retriever: modernSCM([
- $class: 'GitSCMSource',
- remote: 'https://github.com/Percona-Lab/jenkins-pipelines.git'
-]) _
-
-
-pipeline {
- parameters {
- choice(
- choices: ['run-release.csv', 'run-distro.csv'],
- description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.',
- name: 'TEST_SUITE')
- text(
- defaultValue: '',
- description: 'List of tests to run separated by new line',
- name: 'TEST_LIST')
- choice(
- choices: 'NO\nYES',
- description: 'Ignore passed tests in previous run (run all)',
- name: 'IGNORE_PREVIOUS_RUN'
- )
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: 'latest',
- description: 'GKE version',
- name: 'PLATFORM_VER')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
- }
- agent {
- label 'docker'
- }
- options {
- skipDefaultCheckout()
- disableConcurrentBuilds()
- buildDiscarder(logRotator(numToKeepStr: '10', artifactNumToKeepStr: '10'))
- timestamps ()
- }
- triggers {
- cron('0 8 * * 0')
- }
- stages {
- stage("Run parallel") {
- parallel{
-
- stage('Trigger ps-operator-gke-latest job 3 times') {
- steps {
- script {
- for (int i = 1; i <= 3; i++) {
- build job: 'ps-operator-gke-latest', propagate: false, wait: true, parameters: [string(name: 'TEST_SUITE', value: "${TEST_SUITE}"),string(name: 'TEST_LIST', value: "${TEST_LIST}"),string(name: 'IGNORE_PREVIOUS_RUN', value: "${IGNORE_PREVIOUS_RUN}"),string(name: 'GIT_BRANCH', value: "${GIT_BRANCH}"),string(name: 'GIT_REPO', value: "${GIT_REPO}"),string(name: 'PLATFORM_VER', value: "${PLATFORM_VER}"),string(name: 'OPERATOR_IMAGE', value: "${OPERATOR_IMAGE}"),string(name: 'IMAGE_MYSQL', value: "${IMAGE_MYSQL}"),string(name: 'IMAGE_ORCHESTRATOR', value: "${IMAGE_ORCHESTRATOR}"),string(name: 'IMAGE_ROUTER', value: "${IMAGE_ROUTER}"),string(name: 'IMAGE_BACKUP', value: "${IMAGE_BACKUP}"),string(name: 'IMAGE_TOOLKIT', value: "${IMAGE_TOOLKIT}"),string(name: 'IMAGE_HAPROXY', value: "${IMAGE_HAPROXY}"),string(name: 'IMAGE_PMM_CLIENT', value: "${IMAGE_PMM_CLIENT}"),string(name: 'IMAGE_PMM_SERVER', value: "${IMAGE_PMM_CLIENT}")]
- }
- }
- }
- }
-
- stage('Trigger ps-operator-eks-latest job 3 times') {
- steps {
- script {
- for (int i = 1; i <= 3; i++) {
- build job: 'ps-operator-eks-latest', propagate: false, wait: true, parameters: [string(name: 'TEST_SUITE', value: "${TEST_SUITE}"),string(name: 'TEST_LIST', value: "${TEST_LIST}"),string(name: 'IGNORE_PREVIOUS_RUN', value: "${IGNORE_PREVIOUS_RUN}"),string(name: 'GIT_BRANCH', value: "${GIT_BRANCH}"),string(name: 'GIT_REPO', value: "${GIT_REPO}"),string(name: 'PLATFORM_VER', value: "${PLATFORM_VER}"),string(name: 'OPERATOR_IMAGE', value: "${OPERATOR_IMAGE}"),string(name: 'IMAGE_MYSQL', value: "${IMAGE_MYSQL}"),string(name: 'IMAGE_ORCHESTRATOR', value: "${IMAGE_ORCHESTRATOR}"),string(name: 'IMAGE_ROUTER', value: "${IMAGE_ROUTER}"),string(name: 'IMAGE_BACKUP', value: "${IMAGE_BACKUP}"),string(name: 'IMAGE_TOOLKIT', value: "${IMAGE_TOOLKIT}"),string(name: 'IMAGE_HAPROXY', value: "${IMAGE_HAPROXY}"),string(name: 'IMAGE_PMM_CLIENT', value: "${IMAGE_PMM_CLIENT}"),string(name: 'IMAGE_PMM_SERVER', value: "${IMAGE_PMM_CLIENT}")]
- }
- }
- }
- }
- }
- }
- }
- post {
- always {
-
- copyArtifacts(projectName: 'ps-operator-gke-latest', selector: lastCompleted(), target: 'ps-operator-gke-latest')
- copyArtifacts(projectName: 'ps-operator-eks-latest', selector: lastCompleted(), target: 'ps-operator-eks-latest')
- archiveArtifacts '*/*.xml'
- step([$class: 'JUnitResultArchiver', testResults: '*/*.xml', healthScaleFactor: 1.0])
-
- }
- }
-}
diff --git a/cloud/jenkins/ps_operator_aws_openshift-4.groovy b/cloud/jenkins/ps_operator_aws_openshift-4.groovy
deleted file mode 100644
index a23f82069e..0000000000
--- a/cloud/jenkins/ps_operator_aws_openshift-4.groovy
+++ /dev/null
@@ -1,325 +0,0 @@
-void pushArtifactFile(String FILE_NAME) {
- echo "Push $FILE_NAME file to S3!"
-
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- touch ${FILE_NAME}
- S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source rev-parse --short HEAD)
- aws s3 ls \$S3_PATH/${FILE_NAME} || :
- aws s3 cp --quiet ${FILE_NAME} \$S3_PATH/${FILE_NAME} || :
- """
- }
-}
-
-void popArtifactFile(String FILE_NAME) {
- echo "Try to get $FILE_NAME file from S3!"
-
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/\$(git -C source rev-parse --short HEAD)
- aws s3 cp --quiet \$S3_PATH/${FILE_NAME} ${FILE_NAME} || :
- """
- }
-}
-
-TestsReport = '\n'
-testsReportMap = [:]
-void makeReport() {
- for ( test in testsReportMap ) {
- TestsReport = TestsReport + "<${test.value}/>\n"
- }
- TestsReport = TestsReport + '\n'
-}
-
-void runTest(String TEST_NAME) {
- def retryCount = 0
- waitUntil {
- try {
- echo "The $TEST_NAME test was started!"
- testsReportMap[TEST_NAME] = 'failure'
-
- def FILE_NAME = "$GIT_BRANCH-$GIT_SHORT_COMMIT-$TEST_NAME-eks-$PLATFORM_VER-$PARAMS_HASH"
- popArtifactFile("$FILE_NAME")
-
- timeout(time: 90, unit: 'MINUTES') {
- sh """
- if [ -f "$FILE_NAME" ]; then
- echo "Skipping $TEST_NAME test because it passed in previous run."
- else
- cd source
-
- [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- export IMAGE_MYSQL=$IMAGE_MYSQL
- export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
- export IMAGE_ROUTER=$IMAGE_ROUTER
- export IMAGE_HAPROXY=$IMAGE_HAPROXY
- export IMAGE_BACKUP=$IMAGE_BACKUP
- export IMAGE_TOOLKIT=$IMAGE_TOOLKIT
- export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
- export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
-
- export PATH="${HOME}/.krew/bin:$PATH"
- source $HOME/google-cloud-sdk/path.bash.inc
- export KUBECONFIG=$WORKSPACE/openshift/auth/kubeconfig
- oc whoami
-
- kubectl kuttl test --config ./e2e-tests/kuttl.yaml --test "^${TEST_NAME}\$"
- fi
- """
- }
- pushArtifactFile("$FILE_NAME")
- testsReportMap[TEST_NAME] = 'passed'
- return true
- }
- catch (exc) {
- if (retryCount >= 2) {
- currentBuild.result = 'FAILURE'
- return true
- }
- retryCount++
- return false
- }
- }
-
- echo "The $TEST_NAME test was finished!"
-}
-
-void conditionalRunTest(String TEST_NAME) {
- if ( TEST_NAME == 'default-cr' ) {
- if ( params.GIT_BRANCH.contains('release-') ) {
- runTest(TEST_NAME)
- }
- return 0
- }
- runTest(TEST_NAME)
-}
-
-void installRpms() {
- sh """
- sudo yum install -y https://repo.percona.com/yum/percona-release-latest.noarch.rpm || true
- sudo percona-release enable-only tools
- """
-}
-pipeline {
- parameters {
- string(
- defaultValue: '4.7.22',
- description: 'OpenShift version to use',
- name: 'PLATFORM_VER')
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
- }
- environment {
- TF_IN_AUTOMATION = 'true'
- CLEAN_NAMESPACE = 1
- }
- agent {
- label 'docker'
- }
- options {
- buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
- skipDefaultCheckout()
- disableConcurrentBuilds()
- }
-
- stages {
- stage('Prepare') {
- steps {
- sh """
- wget https://releases.hashicorp.com/terraform/0.11.14/terraform_0.11.14_linux_amd64.zip
- unzip -o terraform_0.11.14_linux_amd64.zip
- sudo mv terraform /usr/local/bin/ && rm terraform_0.11.14_linux_amd64.zip
- """
- installRpms()
- sh '''
- if [ ! -d $HOME/google-cloud-sdk/bin ]; then
- rm -rf $HOME/google-cloud-sdk
- curl https://sdk.cloud.google.com | bash
- fi
-
- source $HOME/google-cloud-sdk/path.bash.inc
- gcloud components update kubectl
- gcloud version
-
- curl -s https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz \
- | sudo tar -C /usr/local/bin --strip-components 1 -zvxpf -
-
- sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/v4.34.1/yq_linux_amd64 > /usr/local/bin/yq"
- sudo chmod +x /usr/local/bin/yq
- sudo sh -c "curl -s -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 > /usr/local/bin/jq"
- sudo chmod +x /usr/local/bin/jq
-
- curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$PLATFORM_VER/openshift-client-linux-$PLATFORM_VER.tar.gz \
- | sudo tar -C /usr/local/bin --wildcards -zxvpf -
- curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$PLATFORM_VER/openshift-install-linux-$PLATFORM_VER.tar.gz \
- | sudo tar -C /usr/local/bin --wildcards -zxvpf -
-
- cd "$(mktemp -d)"
- OS="$(uname | tr '[:upper:]' '[:lower:]')"
- ARCH="$(uname -m | sed -e 's/x86_64/amd64/')"
- KREW="krew-${OS}_${ARCH}"
- curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/download/v0.4.2/${KREW}.tar.gz"
- tar zxvf "${KREW}.tar.gz"
- ./"${KREW}" install krew
-
- export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"
-
- kubectl krew install kuttl
- kubectl krew install assert
- '''
-
- }
- }
- stage('Build docker image') {
- steps {
- git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
- withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER'), file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) {
- sh '''
- sudo git config --global --add safe.directory '*'
- sudo git reset --hard
- sudo git clean -xdf
- sudo rm -rf source
- ./cloud/local/checkout $GIT_REPO $GIT_BRANCH
-
- cp $CLOUD_SECRET_FILE ./source/e2e-tests/conf/cloud-secret.yml
-
- if [[ "$OPERATOR_IMAGE" ]]; then
- echo "SKIP: Build is not needed, operator image was set!"
- else
- cd ./source/
- sg docker -c "
- docker login -u '${USER}' -p '${PASS}'
- export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- ./e2e-tests/build
- docker logout
- "
- sudo rm -rf ./build
- fi
- '''
- }
- }
- }
- stage('Create AWS Infrastructure') {
- steps {
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift4-secret-file', variable: 'OPENSHIFT_CONF_FILE')]) {
- sh """
- mkdir openshift
- cp $OPENSHIFT_CONF_FILE ./openshift/install-config.yaml
- sed -i 's/pxc/ps/g' ./openshift/install-config.yaml
- """
- sshagent(['aws-openshift-41-key']) {
- sh """
- /usr/local/bin/openshift-install create cluster --dir=./openshift/
- """
- }
- }
-
- }
- }
- stage('E2E Basic Tests') {
- environment {
- GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim()
- PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim()
- }
- options {
- timeout(time: 3, unit: 'HOURS')
- }
- steps {
- runTest('async-ignore-annotations')
- runTest('auto-config')
- runTest('config')
- runTest('config-router')
- runTest('demand-backup')
- runTest('gr-demand-backup')
- runTest('gr-ignore-annotations')
- runTest('gr-init-deploy')
- runTest('gr-one-pod')
- runTest('gr-scaling')
- runTest('gr-tls-cert-manager')
- runTest('haproxy')
- runTest('init-deploy')
- runTest('limits')
- runTest('monitoring')
- runTest('one-pod')
- runTest('scaling')
- runTest('semi-sync')
- runTest('service-per-pod')
- runTest('sidecars')
- runTest('tls-cert-manager')
- runTest('users')
- runTest('version-service')
- }
- }
- stage('Make report') {
- steps {
- makeReport()
- sh """
- echo "${TestsReport}" > TestsReport.xml
- """
- step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
- archiveArtifacts '*.xml'
- }
- }
- }
-
- post {
- always {
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) {
- sshagent(['aws-openshift-41-key']) {
- sh """
- /usr/local/bin/openshift-install destroy cluster --dir=./openshift/
- """
- }
- }
-
- sh '''
- sudo docker rmi -f \$(sudo docker images -q) || true
- sudo rm -rf $HOME/google-cloud-sdk
- sudo rm -rf ./*
- '''
- deleteDir()
- }
- }
-}
diff --git a/cloud/jenkins/ps_operator_eks_version.groovy b/cloud/jenkins/ps_operator_eks_version.groovy
deleted file mode 100644
index 86cd5de840..0000000000
--- a/cloud/jenkins/ps_operator_eks_version.groovy
+++ /dev/null
@@ -1,514 +0,0 @@
-region='eu-west-2'
-tests=[]
-clusters=[]
-
-void prepareNode() {
- echo "=========================[ Installing tools on the Jenkins executor ]========================="
- sh """
- sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
- kubectl version --client --output=yaml
-
- curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm
-
- sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq
- sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq
-
- curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf -
- ./krew-linux_amd64 install krew
- export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
-
- kubectl krew install assert
-
- # v0.17.0 kuttl version
- kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml
- echo \$(kubectl kuttl --version) is installed
-
- curl -sL https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_\$(uname -s)_amd64.tar.gz | sudo tar -C /usr/local/bin -xzf - && sudo chmod +x /usr/local/bin/eksctl
- """
-}
-
-void prepareSources() {
- if ("$PLATFORM_VER" == "latest") {
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- USED_PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim()
- }
- } else {
- USED_PLATFORM_VER="$PLATFORM_VER"
- }
- echo "USED_PLATFORM_VER=$USED_PLATFORM_VER"
-
- echo "=========================[ Cloning the sources ]========================="
- git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
- sh """
- # sudo is needed for better node recovery after compilation failure
- # if building failed on compilation stage directory will have files owned by docker user
- sudo git config --global --add safe.directory '*'
- sudo git reset --hard
- sudo git clean -xdf
- sudo rm -rf source
- cloud/local/checkout $GIT_REPO $GIT_BRANCH
- """
-
- script {
- GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim()
- CLUSTER_NAME = sh(script: "echo jenkins-ver-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim()
- PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim()
- }
-}
-
-void dockerBuildPush() {
- echo "=========================[ Building and Pushing the operator Docker image ]========================="
- withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
- sh """
- if [[ "$OPERATOR_IMAGE" ]]; then
- echo "SKIP: Build is not needed, operator image was set!"
- else
- cd source
- sg docker -c "
- docker login -u '$USER' -p '$PASS'
- export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- e2e-tests/build
- docker logout
- "
- sudo rm -rf build
- fi
- """
- }
-}
-
-void initTests() {
- echo "=========================[ Initializing the tests ]========================="
-
- echo "Populating tests into the tests array!"
- def testList = "$TEST_LIST"
- def suiteFileName = "source/e2e-tests/$TEST_SUITE"
-
- if (testList.length() != 0) {
- suiteFileName = 'source/e2e-tests/run-custom.csv'
- sh """
- echo -e "$testList" > $suiteFileName
- echo "Custom test suite contains following tests:"
- cat $suiteFileName
- """
- }
-
- def records = readCSV file: suiteFileName
-
- for (int i=0; i/dev/null 2>&1", returnStatus: true)
-
- if (retFileExists == 0) {
- tests[i]["result"] = "passed"
- }
- }
- } else {
- sh """
- aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || :
- """
- }
- }
-
- withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) {
- sh """
- cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml
- chmod 600 source/e2e-tests/conf/cloud-secret.yml
- """
- }
- stash includes: "source/**", name: "sourceFILES"
-}
-
-void clusterRunner(String cluster) {
- def clusterCreated=0
-
- for (int i=0; i= 1) {
- shutdownCluster(cluster)
- }
-}
-
-void createCluster(String CLUSTER_SUFFIX) {
- clusters.add("$CLUSTER_SUFFIX")
-
- sh """
- timestamp="\$(date +%s)"
-tee cluster-${CLUSTER_SUFFIX}.yaml << EOF
-# An example of ClusterConfig showing nodegroups with mixed instances (spot and on demand):
----
-apiVersion: eksctl.io/v1alpha5
-kind: ClusterConfig
-
-metadata:
- name: $CLUSTER_NAME-$CLUSTER_SUFFIX
- region: $region
- version: "$USED_PLATFORM_VER"
- tags:
- 'delete-cluster-after-hours': '10'
- 'creation-time': '\$timestamp'
- 'team': 'cloud'
-iam:
- withOIDC: true
-
-addons:
-- name: aws-ebs-csi-driver
- wellKnownPolicies:
- ebsCSIController: true
-
-nodeGroups:
- - name: ng-1
- minSize: 3
- maxSize: 5
- desiredCapacity: 3
- instanceType: "m5.xlarge"
- iam:
- attachPolicyARNs:
- - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
- - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
- - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
- - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- - arn:aws:iam::aws:policy/AmazonS3FullAccess
- tags:
- 'iit-billing-tag': 'jenkins-eks'
- 'delete-cluster-after-hours': '10'
- 'team': 'cloud'
- 'product': 'ps-operator'
-EOF
- """
-
- // this is needed for always post action because pipeline runs earch parallel step on another instance
- stash includes: "cluster-${CLUSTER_SUFFIX}.yaml", name: "cluster-$CLUSTER_SUFFIX-config"
-
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
- export PATH=/home/ec2-user/.local/bin:\$PATH
- eksctl create cluster -f cluster-${CLUSTER_SUFFIX}.yaml
- kubectl annotate storageclass gp2 storageclass.kubernetes.io/is-default-class=true
- kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user="\$(aws sts get-caller-identity|jq -r '.Arn')"
- """
- }
-}
-
-void runTest(Integer TEST_ID) {
- def retryCount = 0
- def testName = tests[TEST_ID]["name"]
- def clusterSuffix = tests[TEST_ID]["cluster"]
-
- waitUntil {
- def timeStart = new Date().getTime()
- try {
- echo "The $testName test was started on cluster $CLUSTER_NAME-$clusterSuffix !"
- tests[TEST_ID]["result"] = "failure"
-
- timeout(time: 90, unit: 'MINUTES') {
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd'], file(credentialsId: 'eks-conf-file', variable: 'EKS_CONF_FILE')]) {
- sh """
- cd source
-
- [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator
- [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- export IMAGE_MYSQL=$IMAGE_MYSQL
- export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
- export IMAGE_ROUTER=$IMAGE_ROUTER
- export IMAGE_HAPROXY=$IMAGE_HAPROXY
- export IMAGE_BACKUP=$IMAGE_BACKUP
- export IMAGE_TOOLKIT=$IMAGE_TOOLKIT
- export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
- export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
- export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix
- export PATH=\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH
- export PATH=/home/ec2-user/.local/bin:\$PATH
-
- kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$"
- """
- }
- }
- pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
- tests[TEST_ID]["result"] = "passed"
- return true
- }
- catch (exc) {
- if (retryCount >= 1) {
- currentBuild.result = 'FAILURE'
- return true
- }
- retryCount++
- return false
- }
- finally {
- def timeStop = new Date().getTime()
- def durationSec = (timeStop - timeStart) / 1000
- tests[TEST_ID]["time"] = durationSec
- echo "The $testName test was finished!"
- }
- }
-}
-
-void pushArtifactFile(String FILE_NAME) {
- echo "Push $FILE_NAME file to S3!"
-
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- touch $FILE_NAME
- S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT
- aws s3 ls \$S3_PATH/$FILE_NAME || :
- aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || :
- """
- }
-}
-
-TestsReport = '\n'
-void makeReport() {
- echo "=========================[ Generating Test Report ]========================="
- for (int i=0; i<'+ testResult +'/>\n'
- }
- TestsReport = TestsReport + '\n'
-}
-
-void shutdownCluster(String CLUSTER_SUFFIX) {
- unstash "cluster-$CLUSTER_SUFFIX-config"
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
- eksctl delete addon --name aws-ebs-csi-driver --cluster $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region || true
- for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do
- kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true
- kubectl delete sts --all -n \$namespace --force --grace-period=0 || true
- kubectl delete replicasets --all -n \$namespace --force --grace-period=0 || true
- kubectl delete poddisruptionbudget --all -n \$namespace --force --grace-period=0 || true
- kubectl delete services --all -n \$namespace --force --grace-period=0 || true
- kubectl delete pods --all -n \$namespace --force --grace-period=0 || true
- done
- kubectl get svc --all-namespaces || true
-
- VPC_ID=\$(eksctl get cluster --name $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region -ojson | jq --raw-output '.[0].ResourcesVpcConfig.VpcId' || true)
- if [ -n "\$VPC_ID" ]; then
- LOADBALS=\$(aws elb describe-load-balancers --region $region --output json | jq --raw-output '.LoadBalancerDescriptions[] | select(.VPCId == "'\$VPC_ID'").LoadBalancerName')
- for loadbal in \$LOADBALS; do
- aws elb delete-load-balancer --load-balancer-name \$loadbal --region $region
- done
- eksctl delete cluster -f cluster-${CLUSTER_SUFFIX}.yaml --wait --force --disable-nodegroup-eviction || true
-
- VPC_DESC=\$(aws ec2 describe-vpcs --vpc-id \$VPC_ID --region $region || true)
- if [ -n "\$VPC_DESC" ]; then
- aws ec2 delete-vpc --vpc-id \$VPC_ID --region $region || true
- fi
- VPC_DESC=\$(aws ec2 describe-vpcs --vpc-id \$VPC_ID --region $region || true)
- if [ -n "\$VPC_DESC" ]; then
- for secgroup in \$(aws ec2 describe-security-groups --filters Name=vpc-id,Values=\$VPC_ID --query 'SecurityGroups[*].GroupId' --output text --region $region); do
- aws ec2 delete-security-group --group-id \$secgroup --region $region || true
- done
-
- aws ec2 delete-vpc --vpc-id \$VPC_ID --region $region || true
- fi
- fi
- aws cloudformation delete-stack --stack-name eksctl-$CLUSTER_NAME-$CLUSTER_SUFFIX-cluster --region $region || true
- aws cloudformation wait stack-delete-complete --stack-name eksctl-$CLUSTER_NAME-$CLUSTER_SUFFIX-cluster --region $region || true
-
- eksctl get cluster --name $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region || true
- aws cloudformation list-stacks --region $region | jq '.StackSummaries[] | select(.StackName | startswith("'eksctl-$CLUSTER_NAME-$CLUSTER_SUFFIX-cluster'"))' || true
- """
- }
-}
-
-pipeline {
- environment {
- CLOUDSDK_CORE_DISABLE_PROMPTS = 1
- PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim()
- }
- parameters {
- choice(
- choices: ['run-release.csv', 'run-distro.csv'],
- description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.',
- name: 'TEST_SUITE')
- text(
- defaultValue: '',
- description: 'List of tests to run separated by new line',
- name: 'TEST_LIST')
- choice(
- choices: 'NO\nYES',
- description: 'Ignore passed tests in previous run (run all)',
- name: 'IGNORE_PREVIOUS_RUN'
- )
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: 'latest',
- description: 'EKS kubernetes version',
- name: 'PLATFORM_VER')
- choice(
- choices: 'YES\nNO',
- description: 'Run tests in cluster wide mode',
- name: 'CLUSTER_WIDE')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
- }
- agent {
- label 'docker'
- }
- options {
- buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
- skipDefaultCheckout()
- disableConcurrentBuilds()
- copyArtifactPermission('ps-operator-latest-scheduler');
- }
- stages {
- stage('Prepare node') {
- steps {
- prepareNode()
- prepareSources()
- }
- }
- stage('Docker Build and Push') {
- steps {
- dockerBuildPush()
- }
- }
- stage('Init tests') {
- steps {
- initTests()
- }
- }
- stage('Run Tests') {
- options {
- timeout(time: 3, unit: 'HOURS')
- }
- parallel {
- stage('cluster1') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster1')
- }
- }
- stage('cluster2') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster2')
- }
- }
- stage('cluster3') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster3')
- }
- }
- stage('cluster4') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster4')
- }
- }
- }
-
- }
- }
- post {
- always {
- echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
- makeReport()
- sh """
- echo "$TestsReport" > TestsReport.xml
- """
- step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
- archiveArtifacts '*.xml'
-
- script {
- if (currentBuild.result != null && currentBuild.result != 'SUCCESS') {
- slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL"
- }
-
- clusters.each { shutdownCluster(it) }
- }
-
- sh """
- sudo docker system prune --volumes -af
- sudo rm -rf *
- """
- deleteDir()
- }
- }
-}
diff --git a/cloud/jenkins/ps_operator_gke_latest.groovy b/cloud/jenkins/ps_operator_gke_latest.groovy
deleted file mode 100644
index 7722188f3b..0000000000
--- a/cloud/jenkins/ps_operator_gke_latest.groovy
+++ /dev/null
@@ -1,478 +0,0 @@
-region='us-central1-a'
-tests=[]
-clusters=[]
-
-void prepareNode() {
- echo "=========================[ Installing tools on the Jenkins executor ]========================="
- sh """
- sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
- kubectl version --client --output=yaml
-
- curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm
-
- sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq
- sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq
-
- curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf -
- ./krew-linux_amd64 install krew
- export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
-
- kubectl krew install assert
-
- # v0.17.0 kuttl version
- kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml
- echo \$(kubectl kuttl --version) is installed
-
- sudo tee /etc/yum.repos.d/google-cloud-sdk.repo << EOF
-[google-cloud-cli]
-name=Google Cloud CLI
-baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64
-enabled=1
-gpgcheck=1
-repo_gpgcheck=0
-gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
-EOF
- sudo yum install -y google-cloud-cli google-cloud-cli-gke-gcloud-auth-plugin
- """
-
- echo "=========================[ Logging in the Kubernetes provider ]========================="
- withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-alpha-key-file', variable: 'CLIENT_SECRET_FILE')]) {
- sh """
- gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE
- gcloud config set project $GCP_PROJECT
- """
- }
-}
-
-void prepareSources() {
- if ("$PLATFORM_VER" == "latest") {
- USED_PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", , returnStdout: true).trim()
- } else {
- USED_PLATFORM_VER="$PLATFORM_VER"
- }
- echo "USED_PLATFORM_VER=$USED_PLATFORM_VER"
-
- echo "=========================[ Cloning the sources ]========================="
- git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
- sh """
- # sudo is needed for better node recovery after compilation failure
- # if building failed on compilation stage directory will have files owned by docker user
- sudo git config --global --add safe.directory '*'
- sudo git reset --hard
- sudo git clean -xdf
- sudo rm -rf source
- cloud/local/checkout $GIT_REPO $GIT_BRANCH
- """
-
- script {
- GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim()
- CLUSTER_NAME = sh(script: "echo jenkins-lat-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim()
- PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim()
- }
-}
-
-void dockerBuildPush() {
- echo "=========================[ Building and Pushing the operator Docker image ]========================="
- withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
- sh """
- if [[ "$OPERATOR_IMAGE" ]]; then
- echo "SKIP: Build is not needed, operator image was set!"
- else
- cd source
- sg docker -c "
- docker login -u '$USER' -p '$PASS'
- export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- e2e-tests/build
- docker logout
- "
- sudo rm -rf build
- fi
- """
- }
-}
-
-void initTests() {
- echo "=========================[ Initializing the tests ]========================="
-
- echo "Populating tests into the tests array!"
- def testList = "$TEST_LIST"
- def suiteFileName = "source/e2e-tests/$TEST_SUITE"
-
- if (testList.length() != 0) {
- suiteFileName = 'source/e2e-tests/run-custom.csv'
- sh """
- echo -e "$testList" > $suiteFileName
- echo "Custom test suite contains following tests:"
- cat $suiteFileName
- """
- }
-
- def records = readCSV file: suiteFileName
-
- for (int i=0; i/dev/null 2>&1", returnStatus: true)
-
- if (retFileExists == 0) {
- tests[i]["result"] = "passed"
- }
- }
- } else {
- sh """
- aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || :
- """
- }
- }
-
- withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) {
- sh """
- cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml
- """
- }
- stash includes: "source/**", name: "sourceFILES"
-}
-
-void clusterRunner(String cluster) {
- def clusterCreated=0
-
- for (int i=0; i= 1) {
- shutdownCluster(cluster)
- }
-}
-
-void createCluster(String CLUSTER_SUFFIX) {
- clusters.add("$CLUSTER_SUFFIX")
-
- withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) {
- sh """
- export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
-
- maxRetries=15
- exitCode=1
- while [[ \$exitCode != 0 && \$maxRetries > 0 ]]; do
- ret_val=0
- gcloud container clusters create $CLUSTER_NAME-$CLUSTER_SUFFIX \
- --zone $region \
- --cluster-version $USED_PLATFORM_VER \
- --machine-type n1-standard-4 \
- --preemptible \
- --disk-size 30 \
- --num-nodes=3 \
- --network=jenkins-ps-vpc \
- --subnetwork=jenkins-ps-$CLUSTER_SUFFIX \
- --no-enable-autoupgrade \
- --cluster-ipv4-cidr=/21 \
- --labels delete-cluster-after-hours=6 &&\
- kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user jenkins@"$GCP_PROJECT".iam.gserviceaccount.com
- exitCode=\$?
- if [[ \$exitCode == 0 ]]; then break; fi
- (( maxRetries -- ))
- sleep 1
- done
- if [[ \$exitCode != 0 ]]; then exit \$exitCode; fi
- """
- }
-}
-
-void runTest(Integer TEST_ID) {
- def retryCount = 0
- def testName = tests[TEST_ID]["name"]
- def clusterSuffix = tests[TEST_ID]["cluster"]
-
- waitUntil {
- def timeStart = new Date().getTime()
- try {
- echo "The $testName test was started on cluster $CLUSTER_NAME-$clusterSuffix !"
- tests[TEST_ID]["result"] = "failure"
-
- timeout(time: 90, unit: 'MINUTES') {
- sh """
- cd source
-
- [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator
- [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- export IMAGE_MYSQL=$IMAGE_MYSQL
- export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
- export IMAGE_ROUTER=$IMAGE_ROUTER
- export IMAGE_HAPROXY=$IMAGE_HAPROXY
- export IMAGE_BACKUP=$IMAGE_BACKUP
- export IMAGE_TOOLKIT=$IMAGE_TOOLKIT
- export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
- export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
- export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix
- export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
-
- kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$"
- """
- }
- pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
- tests[TEST_ID]["result"] = "passed"
- return true
- }
- catch (exc) {
- if (retryCount >= 1) {
- currentBuild.result = 'FAILURE'
- return true
- }
- retryCount++
- return false
- }
- finally {
- def timeStop = new Date().getTime()
- def durationSec = (timeStop - timeStart) / 1000
- tests[TEST_ID]["time"] = durationSec
- echo "The $testName test was finished!"
- }
- }
-}
-
-void pushArtifactFile(String FILE_NAME) {
- echo "Push $FILE_NAME file to S3!"
-
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- touch $FILE_NAME
- S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT
- aws s3 ls \$S3_PATH/$FILE_NAME || :
- aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || :
- """
- }
-}
-
-TestsReport = '\n'
-void makeReport() {
- echo "=========================[ Generating Test Report ]========================="
- for (int i=0; i<'+ testResult +'/>\n'
- }
- TestsReport = TestsReport + '\n'
-}
-
-void shutdownCluster(String CLUSTER_SUFFIX) {
- withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) {
- sh """
- export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
- for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do
- kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true
- kubectl delete sts --all -n \$namespace --force --grace-period=0 || true
- kubectl delete replicasets --all -n \$namespace --force --grace-period=0 || true
- kubectl delete poddisruptionbudget --all -n \$namespace --force --grace-period=0 || true
- kubectl delete services --all -n \$namespace --force --grace-period=0 || true
- kubectl delete pods --all -n \$namespace --force --grace-period=0 || true
- done
- kubectl get svc --all-namespaces || true
- gcloud container clusters delete --zone $region $CLUSTER_NAME-$CLUSTER_SUFFIX --quiet || true
- """
- }
-}
-
-pipeline {
- environment {
- CLOUDSDK_CORE_DISABLE_PROMPTS = 1
- PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim()
- }
- parameters {
- choice(
- choices: ['run-release.csv', 'run-distro.csv'],
- description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.',
- name: 'TEST_SUITE')
- text(
- defaultValue: '',
- description: 'List of tests to run separated by new line',
- name: 'TEST_LIST')
- choice(
- choices: 'NO\nYES',
- description: 'Ignore passed tests in previous run (run all)',
- name: 'IGNORE_PREVIOUS_RUN'
- )
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: 'latest',
- description: 'GKE version',
- name: 'PLATFORM_VER')
- choice(
- choices: 'YES\nNO',
- description: 'Run tests in cluster wide mode',
- name: 'CLUSTER_WIDE')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
- }
- agent {
- label 'docker'
- }
- options {
- buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
- skipDefaultCheckout()
- disableConcurrentBuilds()
- copyArtifactPermission('ps-operator-latest-scheduler');
- }
- stages {
- stage('Prepare node') {
- steps {
- prepareNode()
- prepareSources()
- }
- }
- stage('Docker Build and Push') {
- steps {
- dockerBuildPush()
- }
- }
- stage('Init tests') {
- steps {
- initTests()
- }
- }
- stage('Run Tests') {
- options {
- timeout(time: 3, unit: 'HOURS')
- }
- parallel {
- stage('cluster1') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster1')
- }
- }
- stage('cluster2') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster2')
- }
- }
- stage('cluster3') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster3')
- }
- }
- stage('cluster4') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster4')
- }
- }
- stage('cluster5') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster5')
- }
- }
- }
- }
- }
- post {
- always {
- echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
- makeReport()
- sh """
- echo "$TestsReport" > TestsReport.xml
- """
- step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
- archiveArtifacts '*.xml'
-
- script {
- if (currentBuild.result != null && currentBuild.result != 'SUCCESS') {
- slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL"
- }
-
- clusters.each { shutdownCluster(it) }
- }
-
- sh """
- sudo docker system prune --volumes -af
- sudo rm -rf *
- """
- deleteDir()
- }
- }
-}
diff --git a/cloud/jenkins/ps_operator_minikube.groovy b/cloud/jenkins/ps_operator_minikube.groovy
deleted file mode 100644
index ad4aa2d704..0000000000
--- a/cloud/jenkins/ps_operator_minikube.groovy
+++ /dev/null
@@ -1,345 +0,0 @@
-tests=[]
-
-void checkoutSources() {
- if ("$IMAGE_MYSQL") {
- currentBuild.description = "$GIT_BRANCH-$PLATFORM_VER-CW_$CLUSTER_WIDE-" + "$IMAGE_MYSQL".split(":")[1]
- }
-
- echo "USED_PLATFORM_VER=$PLATFORM_VER"
-
- echo "=========================[ Cloning the sources ]========================="
- git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
- sh """
- # sudo is needed for better node recovery after compilation failure
- # if building failed on compilation stage directory will have files owned by docker user
- sudo git config --global --add safe.directory '*'
- sudo git reset --hard
- sudo git clean -xdf
- sudo rm -rf source
- cloud/local/checkout $GIT_REPO $GIT_BRANCH
- """
-
- GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim()
- PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim()
-}
-
-void dockerBuildPush() {
- echo "=========================[ Building and Pushing the operator Docker image ]========================="
- withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
- sh """
- if [[ "$OPERATOR_IMAGE" ]]; then
- echo "SKIP: Build is not needed, operator image was set!"
- else
- cd source
- sg docker -c "
- docker buildx create --use
- docker login -u '$USER' -p '$PASS'
- export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
- e2e-tests/build
- docker logout
- "
- sudo rm -rf build
- fi
- """
- }
-}
-
-void initTests() {
- echo "=========================[ Initializing the tests ]========================="
-
- echo "Populating tests into the tests array!"
- def testList = "$TEST_LIST"
- def suiteFileName = "source/e2e-tests/$TEST_SUITE"
-
- if (testList.length() != 0) {
- suiteFileName = 'source/e2e-tests/run-custom.csv'
- sh """
- echo -e "$testList" > $suiteFileName
- echo "Custom test suite contains following tests:"
- cat $suiteFileName
- """
- }
-
- def records = readCSV file: suiteFileName
-
- for (int i=0; i/dev/null 2>&1", returnStatus: true)
-
- if (retFileExists == 0) {
- tests[i]["result"] = "passed"
- }
- }
- } else {
- sh """
- aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || :
- """
- }
- }
-
- withCredentials([file(credentialsId: 'cloud-secret-file', variable: 'CLOUD_SECRET_FILE')]) {
- sh """
- cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml
- """
- }
-}
-
-void installToolsOnNode() {
- echo "=========================[ Installing tools on the Jenkins executor ]========================="
- sh """
- sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
- kubectl version --client --output=yaml
-
- curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm
-
- sudo sh -c "curl -s -L https://github.com/mikefarah/yq/releases/download/v4.35.1/yq_linux_amd64 > /usr/local/bin/yq"
- sudo chmod +x /usr/local/bin/yq
-
- sudo sh -c "curl -s -L https://github.com/jqlang/jq/releases/download/jq-1.6/jq-linux64 > /usr/local/bin/jq"
- sudo chmod +x /usr/local/bin/jq
-
- sudo curl -sLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x /usr/local/bin/minikube
-
- curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf -
- ./krew-linux_amd64 install krew
- export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
-
- kubectl krew install assert
-
- # v0.17.0 kuttl version
- kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml
- echo \$(kubectl kuttl --version) is installed
- """
-}
-
-void clusterRunner(String cluster) {
- sh """
- export CHANGE_MINIKUBE_NONE_USER=true
- /usr/local/bin/minikube start --kubernetes-version $PLATFORM_VER --cpus=6 --memory=28G
- """
-
- for (int i=0; i= 1) {
- currentBuild.result = 'FAILURE'
- return true
- }
- retryCount++
- return false
- }
- finally {
- def timeStop = new Date().getTime()
- def durationSec = (timeStop - timeStart) / 1000
- tests[TEST_ID]["time"] = durationSec
- echo "The $testName test was finished!"
- }
- }
-}
-
-void pushArtifactFile(String FILE_NAME) {
- echo "Push $FILE_NAME file to S3!"
-
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- sh """
- touch $FILE_NAME
- S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT
- aws s3 ls \$S3_PATH/$FILE_NAME || :
- aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || :
- """
- }
-}
-
-TestsReport = '\n'
-void makeReport() {
- echo "=========================[ Generating Test Report ]========================="
- for (int i=0; i<'+ testResult +'/>\n'
- }
- TestsReport = TestsReport + '\n'
-}
-
-
-pipeline {
- environment {
- CLEAN_NAMESPACE = 1
- DB_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim()
- }
-
- parameters {
- choice(
- choices: ['run-minikube.csv', 'run-distro.csv'],
- description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.',
- name: 'TEST_SUITE')
- text(
- defaultValue: '',
- description: 'List of tests to run separated by new line',
- name: 'TEST_LIST')
- choice(
- choices: 'NO\nYES',
- description: 'Ignore passed tests in previous run (run all)',
- name: 'IGNORE_PREVIOUS_RUN'
- )
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona/percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: 'latest',
- description: 'Minikube Kubernetes Version',
- name: 'PLATFORM_VER',
- trim: true)
- choice(
- choices: 'YES\nNO',
- description: 'Run tests in cluster wide mode',
- name: 'CLUSTER_WIDE')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
- }
-
- agent {
- label 'docker-32gb'
- }
-
- options {
- buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
- skipDefaultCheckout()
- }
-
- stages {
- stage('Checkout sources') {
- steps {
- checkoutSources()
- }
- }
- stage('Docker Build and Push') {
- steps {
- dockerBuildPush()
- }
- }
- stage('Init tests') {
- steps {
- initTests()
- }
- }
- stage('Run Tests') {
- options {
- timeout(time: 3, unit: 'HOURS')
- }
- steps {
- installToolsOnNode()
- clusterRunner('cluster1')
- }
- }
- }
-
- post {
- always {
- echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
- makeReport()
- sh """
- echo "$TestsReport" > TestsReport.xml
- """
- step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
- archiveArtifacts '*.xml'
- sh """
- /usr/local/bin/minikube delete || true
- """
- deleteDir()
- }
- }
-}
\ No newline at end of file
diff --git a/cloud/jenkins/ps-operator-eks-latest.yml b/cloud/jenkins/pso-eks-2.yml
similarity index 77%
rename from cloud/jenkins/ps-operator-eks-latest.yml
rename to cloud/jenkins/pso-eks-2.yml
index 6533d7cafc..aa85b5b5a4 100644
--- a/cloud/jenkins/ps-operator-eks-latest.yml
+++ b/cloud/jenkins/pso-eks-2.yml
@@ -1,5 +1,5 @@
- job:
- name: ps-operator-eks-latest
+ name: pso-eks-2
project-type: pipeline
description: |
Do not edit this job through the web!
@@ -11,4 +11,4 @@
- master
wipe-workspace: false
lightweight-checkout: true
- script-path: cloud/jenkins/ps_operator_eks_latest.groovy
+ script-path: cloud/jenkins/pso_eks.groovy
diff --git a/cloud/jenkins/ps-operator-eks-version.yml b/cloud/jenkins/pso-eks.yml
similarity index 76%
rename from cloud/jenkins/ps-operator-eks-version.yml
rename to cloud/jenkins/pso-eks.yml
index 85d2f0661d..66dbeea465 100644
--- a/cloud/jenkins/ps-operator-eks-version.yml
+++ b/cloud/jenkins/pso-eks.yml
@@ -1,5 +1,5 @@
- job:
- name: ps-operator-eks-version
+ name: pso-eks
project-type: pipeline
description: |
Do not edit this job through the web!
@@ -11,4 +11,4 @@
- master
wipe-workspace: false
lightweight-checkout: true
- script-path: cloud/jenkins/ps_operator_eks_version.groovy
+ script-path: cloud/jenkins/pso_eks.groovy
diff --git a/cloud/jenkins/ps-operator-minikube.yml b/cloud/jenkins/pso-gke-2.yml
similarity index 84%
rename from cloud/jenkins/ps-operator-minikube.yml
rename to cloud/jenkins/pso-gke-2.yml
index 78698ce88c..3171805ec9 100644
--- a/cloud/jenkins/ps-operator-minikube.yml
+++ b/cloud/jenkins/pso-gke-2.yml
@@ -1,5 +1,5 @@
- job:
- name: ps-operator-minikube
+ name: pso-gke-2
project-type: pipeline
description: |
Do not edit this job through the web!
@@ -18,4 +18,4 @@
- master
wipe-workspace: false
lightweight-checkout: true
- script-path: cloud/jenkins/ps_operator_minikube.groovy
+ script-path: cloud/jenkins/pso_gke.groovy
diff --git a/cloud/jenkins/ps-operator-gke-latest.yml b/cloud/jenkins/pso-gke.yml
similarity index 83%
rename from cloud/jenkins/ps-operator-gke-latest.yml
rename to cloud/jenkins/pso-gke.yml
index 702116970c..df166f36dc 100644
--- a/cloud/jenkins/ps-operator-gke-latest.yml
+++ b/cloud/jenkins/pso-gke.yml
@@ -1,5 +1,5 @@
- job:
- name: ps-operator-gke-latest
+ name: pso-gke
project-type: pipeline
description: |
Do not edit this job through the web!
@@ -18,4 +18,4 @@
- master
wipe-workspace: false
lightweight-checkout: true
- script-path: cloud/jenkins/ps_operator_gke_latest.groovy
+ script-path: cloud/jenkins/pso_gke.groovy
diff --git a/cloud/jenkins/ps-operator-gke-version.yml b/cloud/jenkins/pso-minikube.yml
similarity index 83%
rename from cloud/jenkins/ps-operator-gke-version.yml
rename to cloud/jenkins/pso-minikube.yml
index 65bcc4a4cc..1e9d28338d 100644
--- a/cloud/jenkins/ps-operator-gke-version.yml
+++ b/cloud/jenkins/pso-minikube.yml
@@ -1,5 +1,5 @@
- job:
- name: ps-operator-gke-version
+ name: pso-minikube
project-type: pipeline
description: |
Do not edit this job through the web!
@@ -18,4 +18,4 @@
- master
wipe-workspace: false
lightweight-checkout: true
- script-path: cloud/jenkins/ps_operator_gke_version.groovy
+ script-path: cloud/jenkins/pso_minikube.groovy
diff --git a/cloud/jenkins/pso-openshift-2.yml b/cloud/jenkins/pso-openshift-2.yml
new file mode 100644
index 0000000000..786dcb9cfd
--- /dev/null
+++ b/cloud/jenkins/pso-openshift-2.yml
@@ -0,0 +1,21 @@
+- job:
+ name: pso-os-2
+ project-type: pipeline
+ description: |
+ Do not edit this job through the web!
+ concurrent: false
+ properties:
+ - build-discarder:
+ days-to-keep: -1
+ num-to-keep: 10
+ artifact-days-to-keep: -1
+ artifact-num-to-keep: 10
+ pipeline-scm:
+ scm:
+ - git:
+ url: https://github.com/Percona-Lab/jenkins-pipelines.git
+ branches:
+ - master
+ wipe-workspace: false
+ lightweight-checkout: true
+ script-path: cloud/jenkins/pso_openshift.groovy
diff --git a/cloud/jenkins/pso-openshift.yml b/cloud/jenkins/pso-openshift.yml
new file mode 100644
index 0000000000..3bb9a983c3
--- /dev/null
+++ b/cloud/jenkins/pso-openshift.yml
@@ -0,0 +1,21 @@
+- job:
+ name: pso-os
+ project-type: pipeline
+ description: |
+ Do not edit this job through the web!
+ concurrent: false
+ properties:
+ - build-discarder:
+ days-to-keep: -1
+ num-to-keep: 10
+ artifact-days-to-keep: -1
+ artifact-num-to-keep: 10
+ pipeline-scm:
+ scm:
+ - git:
+ url: https://github.com/Percona-Lab/jenkins-pipelines.git
+ branches:
+ - master
+ wipe-workspace: false
+ lightweight-checkout: true
+ script-path: cloud/jenkins/pso_openshift.groovy
diff --git a/cloud/jenkins/ps_operator_eks_latest.groovy b/cloud/jenkins/pso_eks.groovy
similarity index 65%
rename from cloud/jenkins/ps_operator_eks_latest.groovy
rename to cloud/jenkins/pso_eks.groovy
index 617d44a856..997a1e6543 100644
--- a/cloud/jenkins/ps_operator_eks_latest.groovy
+++ b/cloud/jenkins/pso_eks.groovy
@@ -1,8 +1,21 @@
region='eu-west-2'
tests=[]
clusters=[]
+release_versions="source/e2e-tests/release_versions"
-void prepareNode() {
+String getParam(String paramName, String keyName = null) {
+ keyName = keyName ?: paramName
+
+ param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim()
+ if ("$param") {
+ echo "$paramName=$param (from params file)"
+ } else {
+ error("$keyName not found in params file $release_versions")
+ }
+ return param
+}
+
+void prepareAgent() {
echo "=========================[ Installing tools on the Jenkins executor ]========================="
sh """
sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
@@ -28,31 +41,46 @@ void prepareNode() {
}
void prepareSources() {
- if ("$PLATFORM_VER" == "latest") {
- withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
- USED_PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", , returnStdout: true).trim()
- }
- } else {
- USED_PLATFORM_VER="$PLATFORM_VER"
- }
- echo "USED_PLATFORM_VER=$USED_PLATFORM_VER"
-
echo "=========================[ Cloning the sources ]========================="
git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
sh """
- # sudo is needed for better node recovery after compilation failure
- # if building failed on compilation stage directory will have files owned by docker user
- sudo git config --global --add safe.directory '*'
- sudo git reset --hard
- sudo git clean -xdf
- sudo rm -rf source
- cloud/local/checkout $GIT_REPO $GIT_BRANCH
+ git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source
"""
- script {
- GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim()
- CLUSTER_NAME = sh(script: "echo jenkins-lat-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim()
- PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim()
+ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim()
+ PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim()
+ CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim()
+}
+
+void initParams() {
+ if ("$PILLAR_VERSION" != "none") {
+ echo "=========================[ Getting parameters for release test ]========================="
+ IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR")
+ IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}")
+ IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}")
+ IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}")
+ IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY")
+ IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR")
+ IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT")
+ IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT")
+ IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER")
+ if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") {
+ PLATFORM_VER = getParam("PLATFORM_VER", "EKS_${PLATFORM_VER}")
+ }
+ } else {
+ echo "=========================[ Not a release run. Using job params only! ]========================="
+ }
+
+ if ("$PLATFORM_VER" == "latest") {
+ withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
+ PLATFORM_VER = sh(script: "aws eks describe-addon-versions --query 'addons[].addonVersions[].compatibilities[].clusterVersion' --output json | jq -r 'flatten | unique | sort | reverse | .[0]'", returnStdout: true).trim()
+ }
+ }
+
+ if ("$IMAGE_MYSQL") {
+ cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW"
+ currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH"
+ currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw"
}
}
@@ -60,7 +88,7 @@ void dockerBuildPush() {
echo "=========================[ Building and Pushing the operator Docker image ]========================="
withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
sh """
- if [[ "$OPERATOR_IMAGE" ]]; then
+ if [[ "$IMAGE_OPERATOR" ]]; then
echo "SKIP: Build is not needed, operator image was set!"
else
cd source
@@ -107,7 +135,7 @@ void initTests() {
for (int i=0; i/dev/null 2>&1", returnStatus: true)
if (retFileExists == 0) {
@@ -124,7 +152,6 @@ void initTests() {
withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) {
sh """
cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml
- chmod 600 source/e2e-tests/conf/cloud-secret.yml
"""
}
stash includes: "source/**", name: "sourceFILES"
@@ -151,60 +178,48 @@ void clusterRunner(String cluster) {
}
void createCluster(String CLUSTER_SUFFIX) {
- clusters.add("$CLUSTER_SUFFIX")
-
sh """
timestamp="\$(date +%s)"
tee cluster-${CLUSTER_SUFFIX}.yaml << EOF
-# An example of ClusterConfig showing nodegroups with mixed instances (spot and on demand):
----
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
-
metadata:
- name: $CLUSTER_NAME-$CLUSTER_SUFFIX
- region: $region
- version: "$USED_PLATFORM_VER"
- tags:
- 'delete-cluster-after-hours': '10'
- 'creation-time': '\$timestamp'
- 'team': 'cloud'
+ name: $CLUSTER_NAME-$CLUSTER_SUFFIX
+ region: $region
+ version: "$PLATFORM_VER"
+ tags:
+ 'delete-cluster-after-hours': '10'
+ 'creation-time': '\$timestamp'
+ 'team': 'cloud'
iam:
withOIDC: true
-
addons:
- name: aws-ebs-csi-driver
wellKnownPolicies:
ebsCSIController: true
-
nodeGroups:
- - name: ng-1
- minSize: 3
- maxSize: 5
- desiredCapacity: 3
- instanceType: "m5.xlarge"
- iam:
- attachPolicyARNs:
- - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
- - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
- - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
- - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
- - arn:aws:iam::aws:policy/AmazonS3FullAccess
- tags:
- 'iit-billing-tag': 'jenkins-eks'
- 'delete-cluster-after-hours': '10'
- 'team': 'cloud'
- 'product': 'ps-operator'
+- name: ng-1
+ minSize: 3
+ maxSize: 5
+ instanceType: 'm5.xlarge'
+ iam:
+ attachPolicyARNs:
+ - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy
+ - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy
+ - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly
+ - arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore
+ - arn:aws:iam::aws:policy/AmazonS3FullAccess
+ tags:
+ 'iit-billing-tag': 'jenkins-eks'
+ 'delete-cluster-after-hours': '10'
+ 'team': 'cloud'
+ 'product': 'ps-operator'
EOF
"""
- // this is needed for always post action because pipeline runs earch parallel step on another instance
- stash includes: "cluster-${CLUSTER_SUFFIX}.yaml", name: "cluster-$CLUSTER_SUFFIX-config"
-
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh """
export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
- export PATH=/home/ec2-user/.local/bin:\$PATH
eksctl create cluster -f cluster-${CLUSTER_SUFFIX}.yaml
kubectl annotate storageclass gp2 storageclass.kubernetes.io/is-default-class=true
kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user="\$(aws sts get-caller-identity|jq -r '.Arn')"
@@ -228,29 +243,30 @@ void runTest(Integer TEST_ID) {
sh """
cd source
+ export DEBUG_TESTS=1
[[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator
- [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
+ export IMAGE=$IMAGE_OPERATOR
export IMAGE_MYSQL=$IMAGE_MYSQL
- export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ export IMAGE_BACKUP=$IMAGE_BACKUP
export IMAGE_ROUTER=$IMAGE_ROUTER
export IMAGE_HAPROXY=$IMAGE_HAPROXY
- export IMAGE_BACKUP=$IMAGE_BACKUP
+ export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
export IMAGE_TOOLKIT=$IMAGE_TOOLKIT
export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix
- export PATH=\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH
- export PATH=/home/ec2-user/.local/bin:\$PATH
+ export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$"
"""
}
}
- pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
+ pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$PLATFORM_VER-$DB_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
tests[TEST_ID]["result"] = "passed"
return true
}
catch (exc) {
+ echo "Error occurred while running test $testName: $exc"
if (retryCount >= 1) {
currentBuild.result = 'FAILURE'
return true
@@ -280,25 +296,37 @@ void pushArtifactFile(String FILE_NAME) {
}
}
-TestsReport = '\n'
void makeReport() {
echo "=========================[ Generating Test Report ]========================="
- for (int i=0; i<'+ testResult +'/>\n'
+ testsReport = "\n"
+ for (int i = 0; i < tests.size(); i ++) {
+ testsReport += '<'+ tests[i]["result"] +'/>\n'
}
- TestsReport = TestsReport + '\n'
+ testsReport += '\n'
+
+ echo "=========================[ Generating Parameters Report ]========================="
+ pipelineParameters = """
+ testsuite name=$JOB_NAME
+ IMAGE_OPERATOR=$IMAGE_OPERATOR
+ IMAGE_MYSQL=$IMAGE_MYSQL
+ IMAGE_BACKUP=$IMAGE_BACKUP
+ IMAGE_ROUTER=$IMAGE_ROUTER
+ IMAGE_HAPROXY=$IMAGE_HAPROXY
+ IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ IMAGE_TOOLKIT=$IMAGE_TOOLKIT
+ IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
+ IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
+ PLATFORM_VER=$PLATFORM_VER
+ """
+
+ writeFile file: "TestsReport.xml", text: testsReport
+ writeFile file: 'PipelineParameters.txt', text: pipelineParameters
}
void shutdownCluster(String CLUSTER_SUFFIX) {
- unstash "cluster-$CLUSTER_SUFFIX-config"
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'eks-cicd', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh """
export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
- eksctl delete addon --name aws-ebs-csi-driver --cluster $CLUSTER_NAME-$CLUSTER_SUFFIX --region $region || true
for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do
kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true
kubectl delete sts --all -n \$namespace --force --grace-period=0 || true
@@ -341,75 +369,25 @@ void shutdownCluster(String CLUSTER_SUFFIX) {
pipeline {
environment {
- CLOUDSDK_CORE_DISABLE_PROMPTS = 1
- PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim()
+ DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim()
}
parameters {
- choice(
- choices: ['run-release.csv', 'run-distro.csv'],
- description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.',
- name: 'TEST_SUITE')
- text(
- defaultValue: '',
- description: 'List of tests to run separated by new line',
- name: 'TEST_LIST')
- choice(
- choices: 'NO\nYES',
- description: 'Ignore passed tests in previous run (run all)',
- name: 'IGNORE_PREVIOUS_RUN'
- )
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: 'latest',
- description: 'EKS kubernetes version',
- name: 'PLATFORM_VER')
- choice(
- choices: 'YES\nNO',
- description: 'Run tests in cluster wide mode',
- name: 'CLUSTER_WIDE')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
+ choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.')
+ text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line')
+ choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)')
+ choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.')
+ string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository')
+ string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'EKS kubernetes version. If set to min or max, value will be automatically taken from release_versions file.')
+ choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode')
+ string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main')
+ string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql')
+ string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup')
+ string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router')
+ string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy')
+ string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator')
+ string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit')
+ string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest')
+ string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest')
}
agent {
label 'docker'
@@ -418,13 +396,15 @@ pipeline {
buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
skipDefaultCheckout()
disableConcurrentBuilds()
- copyArtifactPermission('ps-operator-latest-scheduler');
+ copyArtifactPermission('weekly-pso');
}
stages {
- stage('Prepare node') {
+ stage('Prepare Node') {
steps {
- prepareNode()
+ script { deleteDir() }
+ prepareAgent()
prepareSources()
+ initParams()
}
}
stage('Docker Build and Push') {
@@ -432,7 +412,7 @@ pipeline {
dockerBuildPush()
}
}
- stage('Init tests') {
+ stage('Init Tests') {
steps {
initTests()
}
@@ -443,58 +423,46 @@ pipeline {
}
parallel {
stage('cluster1') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster1')
}
}
stage('cluster2') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster2')
}
}
stage('cluster3') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster3')
}
}
stage('cluster4') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster4')
}
}
}
-
}
}
post {
always {
echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
makeReport()
- sh """
- echo "$TestsReport" > TestsReport.xml
- """
step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
- archiveArtifacts '*.xml'
+ archiveArtifacts '*.xml,*.txt'
script {
if (currentBuild.result != null && currentBuild.result != 'SUCCESS') {
@@ -503,12 +471,6 @@ pipeline {
clusters.each { shutdownCluster(it) }
}
-
- sh """
- sudo docker system prune --volumes -af
- sudo rm -rf *
- """
- deleteDir()
}
}
}
diff --git a/cloud/jenkins/ps_operator_gke_version.groovy b/cloud/jenkins/pso_gke.groovy
similarity index 60%
rename from cloud/jenkins/ps_operator_gke_version.groovy
rename to cloud/jenkins/pso_gke.groovy
index 2ac3eeacde..e60467d0c6 100644
--- a/cloud/jenkins/ps_operator_gke_version.groovy
+++ b/cloud/jenkins/pso_gke.groovy
@@ -1,8 +1,21 @@
region='us-central1-a'
tests=[]
clusters=[]
+release_versions="source/e2e-tests/release_versions"
-void prepareNode() {
+String getParam(String paramName, String keyName = null) {
+ keyName = keyName ?: paramName
+
+ param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim()
+ if ("$param") {
+ echo "$paramName=$param (from params file)"
+ } else {
+ error("$keyName not found in params file $release_versions")
+ }
+ return param
+}
+
+void prepareAgent() {
echo "=========================[ Installing tools on the Jenkins executor ]========================="
sh """
sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
@@ -35,7 +48,6 @@ EOF
sudo yum install -y google-cloud-cli google-cloud-cli-gke-gcloud-auth-plugin
"""
- echo "=========================[ Logging in the Kubernetes provider ]========================="
withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) {
sh """
gcloud auth activate-service-account --key-file $CLIENT_SECRET_FILE
@@ -45,29 +57,47 @@ EOF
}
void prepareSources() {
- if ("$PLATFORM_VER" == "latest") {
- USED_PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=RAPID' --format='value(channels.validVersions)' | cut -d- -f1", , returnStdout: true).trim()
- } else {
- USED_PLATFORM_VER="$PLATFORM_VER"
- }
- echo "USED_PLATFORM_VER=$USED_PLATFORM_VER"
-
echo "=========================[ Cloning the sources ]========================="
git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
sh """
- # sudo is needed for better node recovery after compilation failure
- # if building failed on compilation stage directory will have files owned by docker user
- sudo git config --global --add safe.directory '*'
- sudo git reset --hard
- sudo git clean -xdf
- sudo rm -rf source
- cloud/local/checkout $GIT_REPO $GIT_BRANCH
+ git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source
"""
- script {
- GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', , returnStdout: true).trim()
- CLUSTER_NAME = sh(script: "echo jenkins-ver-ps-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", , returnStdout: true).trim()
- PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$USED_PLATFORM_VER-$OPERATOR_IMAGE-$IMAGE_MYSQL-$IMAGE_ORCHESTRATOR-$IMAGE_ROUTER-$IMAGE_BACKUP-$IMAGE_TOOLKIT-$IMAGE_HAPROXY-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", , returnStdout: true).trim()
+ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim()
+ PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$GKE_RELEASE_CHANNEL-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim()
+ CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim()
+}
+
+void initParams() {
+ if ("$PILLAR_VERSION" != "none") {
+ echo "=========================[ Getting parameters for release test ]========================="
+ GKE_RELEASE_CHANNEL = "stable"
+ echo "Forcing GKE_RELEASE_CHANNEL=stable, because it's a release run!"
+
+ IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR")
+ IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}")
+ IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}")
+ IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}")
+ IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY")
+ IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR")
+ IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT")
+ IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT")
+ IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER")
+ if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") {
+ PLATFORM_VER = getParam("PLATFORM_VER", "GKE_${PLATFORM_VER}")
+ }
+ } else {
+ echo "=========================[ Not a release run. Using job params only! ]========================="
+ }
+
+ if ("$PLATFORM_VER" == "latest") {
+ PLATFORM_VER = sh(script: "gcloud container get-server-config --region=$region --flatten=channels --filter='channels.channel=$GKE_RELEASE_CHANNEL' --format='value(channels.validVersions)' | cut -d- -f1", returnStdout: true).trim()
+ }
+
+ if ("$IMAGE_MYSQL") {
+ cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW"
+ currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH"
+ currentBuild.description = "$PLATFORM_VER-$GKE_RELEASE_CHANNEL " + "$IMAGE_MYSQL".split(":")[1] + " $cw"
}
}
@@ -75,7 +105,7 @@ void dockerBuildPush() {
echo "=========================[ Building and Pushing the operator Docker image ]========================="
withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
sh """
- if [[ "$OPERATOR_IMAGE" ]]; then
+ if [[ "$IMAGE_OPERATOR" ]]; then
echo "SKIP: Build is not needed, operator image was set!"
else
cd source
@@ -122,7 +152,7 @@ void initTests() {
for (int i=0; i/dev/null 2>&1", returnStatus: true)
if (retFileExists == 0) {
@@ -165,38 +195,49 @@ void clusterRunner(String cluster) {
}
void createCluster(String CLUSTER_SUFFIX) {
- clusters.add("$CLUSTER_SUFFIX")
-
withCredentials([string(credentialsId: 'GCP_PROJECT_ID', variable: 'GCP_PROJECT'), file(credentialsId: 'gcloud-key-file', variable: 'CLIENT_SECRET_FILE')]) {
sh """
export KUBECONFIG=/tmp/$CLUSTER_NAME-$CLUSTER_SUFFIX
-
maxRetries=15
exitCode=1
+
while [[ \$exitCode != 0 && \$maxRetries > 0 ]]; do
- ret_val=0
gcloud container clusters create $CLUSTER_NAME-$CLUSTER_SUFFIX \
--release-channel $GKE_RELEASE_CHANNEL \
--zone $region \
- --cluster-version $USED_PLATFORM_VER \
- --machine-type n1-standard-4 \
+ --cluster-version $PLATFORM_VER \
--preemptible \
--disk-size 30 \
- --num-nodes=3 \
- --network=jenkins-ps-vpc \
- --subnetwork=jenkins-ps-$CLUSTER_SUFFIX \
- --no-enable-autoupgrade \
+ --machine-type n1-standard-4 \
+ --num-nodes=4 \
+ --min-nodes=4 \
+ --max-nodes=6 \
+ --network=jenkins-vpc \
+ --subnetwork=jenkins-$CLUSTER_SUFFIX \
--cluster-ipv4-cidr=/21 \
- --labels delete-cluster-after-hours=6 &&\
- kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user jenkins@"$GCP_PROJECT".iam.gserviceaccount.com
+ --labels delete-cluster-after-hours=6 \
+ --enable-ip-alias &&\
+ kubectl create clusterrolebinding cluster-admin-binding1 --clusterrole=cluster-admin --user=\$(gcloud config get-value core/account)
exitCode=\$?
if [[ \$exitCode == 0 ]]; then break; fi
(( maxRetries -- ))
sleep 1
done
if [[ \$exitCode != 0 ]]; then exit \$exitCode; fi
+
+ CURRENT_TIME=\$(date --rfc-3339=seconds)
+ FUTURE_TIME=\$(date -d '6 hours' --rfc-3339=seconds)
+
+ # When using the STABLE release channel, auto-upgrade must be enabled for node pools, which means you cannot manually disable it,
+ # so we can't just use --no-enable-autoupgrade in the command above, so we need the following workaround.
+ gcloud container clusters update $CLUSTER_NAME-$CLUSTER_SUFFIX \
+ --zone $region \
+ --add-maintenance-exclusion-start "\$CURRENT_TIME" \
+ --add-maintenance-exclusion-end "\$FUTURE_TIME"
+
+ kubectl get nodes -o custom-columns="NAME:.metadata.name,TAINTS:.spec.taints,AGE:.metadata.creationTimestamp"
"""
- }
+ }
}
void runTest(Integer TEST_ID) {
@@ -214,13 +255,14 @@ void runTest(Integer TEST_ID) {
sh """
cd source
+ export DEBUG_TESTS=1
[[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator
- [[ "$OPERATOR_IMAGE" ]] && export IMAGE=$OPERATOR_IMAGE || export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
+ export IMAGE=$IMAGE_OPERATOR
export IMAGE_MYSQL=$IMAGE_MYSQL
- export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ export IMAGE_BACKUP=$IMAGE_BACKUP
export IMAGE_ROUTER=$IMAGE_ROUTER
export IMAGE_HAPROXY=$IMAGE_HAPROXY
- export IMAGE_BACKUP=$IMAGE_BACKUP
+ export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
export IMAGE_TOOLKIT=$IMAGE_TOOLKIT
export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
@@ -230,11 +272,12 @@ void runTest(Integer TEST_ID) {
kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$"
"""
}
- pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$USED_PLATFORM_VER-$PS_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
+ pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$PLATFORM_VER-$DB_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
tests[TEST_ID]["result"] = "passed"
return true
}
catch (exc) {
+ echo "Error occurred while running test $testName: $exc"
if (retryCount >= 1) {
currentBuild.result = 'FAILURE'
return true
@@ -264,17 +307,31 @@ void pushArtifactFile(String FILE_NAME) {
}
}
-TestsReport = '\n'
void makeReport() {
echo "=========================[ Generating Test Report ]========================="
- for (int i=0; i<'+ testResult +'/>\n'
+ testsReport = "\n"
+ for (int i = 0; i < tests.size(); i ++) {
+ testsReport += '<'+ tests[i]["result"] +'/>\n'
}
- TestsReport = TestsReport + '\n'
+ testsReport += '\n'
+
+ echo "=========================[ Generating Parameters Report ]========================="
+ pipelineParameters = """
+ testsuite name=$JOB_NAME
+ IMAGE_OPERATOR=$IMAGE_OPERATOR
+ IMAGE_MYSQL=$IMAGE_MYSQL
+ IMAGE_BACKUP=$IMAGE_BACKUP
+ IMAGE_ROUTER=$IMAGE_ROUTER
+ IMAGE_HAPROXY=$IMAGE_HAPROXY
+ IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ IMAGE_TOOLKIT=$IMAGE_TOOLKIT
+ IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
+ IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
+ PLATFORM_VER=$PLATFORM_VER
+ """
+
+ writeFile file: "TestsReport.xml", text: testsReport
+ writeFile file: 'PipelineParameters.txt', text: pipelineParameters
}
void shutdownCluster(String CLUSTER_SUFFIX) {
@@ -297,79 +354,26 @@ void shutdownCluster(String CLUSTER_SUFFIX) {
pipeline {
environment {
- CLOUDSDK_CORE_DISABLE_PROMPTS = 1
- PS_TAG = sh(script: "[[ \"$IMAGE_MYSQL\" ]] && echo $IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", , returnStdout: true).trim()
+ DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim()
}
parameters {
- choice(
- choices: ['run-release.csv', 'run-distro.csv'],
- description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.',
- name: 'TEST_SUITE')
- text(
- defaultValue: '',
- description: 'List of tests to run separated by new line',
- name: 'TEST_LIST')
- choice(
- choices: 'NO\nYES',
- description: 'Ignore passed tests in previous run (run all)',
- name: 'IGNORE_PREVIOUS_RUN'
- )
- string(
- defaultValue: 'main',
- description: 'Tag/Branch for percona/percona-server-mysql-operator repository',
- name: 'GIT_BRANCH')
- string(
- defaultValue: 'https://github.com/percona/percona-server-mysql-operator',
- description: 'percona-server-mysql-operator repository',
- name: 'GIT_REPO')
- string(
- defaultValue: 'latest',
- description: 'GKE version',
- name: 'PLATFORM_VER')
- choice(
- choices: 'None\nstable\nregular\nrapid',
- description: 'GKE release channel',
- name: 'GKE_RELEASE_CHANNEL')
- choice(
- choices: 'YES\nNO',
- description: 'Run tests in cluster wide mode',
- name: 'CLUSTER_WIDE')
- string(
- defaultValue: '',
- description: 'Operator image: perconalab/percona-server-mysql-operator:main',
- name: 'OPERATOR_IMAGE')
- string(
- defaultValue: '',
- description: 'PS for MySQL image: perconalab/percona-server-mysql-operator:main-ps8.0',
- name: 'IMAGE_MYSQL')
- string(
- defaultValue: '',
- description: 'Orchestrator image: perconalab/percona-server-mysql-operator:main-orchestrator',
- name: 'IMAGE_ORCHESTRATOR')
- string(
- defaultValue: '',
- description: 'MySQL Router image: perconalab/percona-server-mysql-operator:main-router',
- name: 'IMAGE_ROUTER')
- string(
- defaultValue: '',
- description: 'XtraBackup image: perconalab/percona-server-mysql-operator:main-backup',
- name: 'IMAGE_BACKUP')
- string(
- defaultValue: '',
- description: 'Toolkit image: perconalab/percona-server-mysql-operator:main-toolkit',
- name: 'IMAGE_TOOLKIT')
- string(
- defaultValue: '',
- description: 'HAProxy image: perconalab/percona-server-mysql-operator:main-haproxy',
- name: 'IMAGE_HAPROXY')
- string(
- defaultValue: '',
- description: 'PMM client image: perconalab/pmm-client:dev-latest',
- name: 'IMAGE_PMM_CLIENT')
- string(
- defaultValue: '',
- description: 'PMM server image: perconalab/pmm-server:dev-latest',
- name: 'IMAGE_PMM_SERVER')
+ choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.')
+ text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line')
+ choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)')
+ choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.')
+ string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository')
+ string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'GKE kubernetes version. If set to min or max, value will be automatically taken from release_versions file.')
+ choice(name: 'GKE_RELEASE_CHANNEL', choices: 'rapid\nstable\nregular\nNone', description: 'GKE release channel. Will be forced to stable for release run.')
+ choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode')
+ string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main')
+ string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql')
+ string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup')
+ string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router')
+ string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy')
+ string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator')
+ string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit')
+ string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest')
+ string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest')
}
agent {
label 'docker'
@@ -378,13 +382,15 @@ pipeline {
buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
skipDefaultCheckout()
disableConcurrentBuilds()
- copyArtifactPermission('ps-operator-latest-scheduler');
+ copyArtifactPermission('weekly-pso');
}
stages {
- stage('Prepare node') {
+ stage('Prepare Node') {
steps {
- prepareNode()
+ script { deleteDir() }
+ prepareAgent()
prepareSources()
+ initParams()
}
}
stage('Docker Build and Push') {
@@ -392,7 +398,7 @@ pipeline {
dockerBuildPush()
}
}
- stage('Init tests') {
+ stage('Init Tests') {
steps {
initTests()
}
@@ -403,55 +409,37 @@ pipeline {
}
parallel {
stage('cluster1') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster1')
}
}
stage('cluster2') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster2')
}
}
stage('cluster3') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster3')
}
}
stage('cluster4') {
- agent {
- label 'docker'
- }
+ agent { label 'docker' }
steps {
- prepareNode()
+ prepareAgent()
unstash "sourceFILES"
clusterRunner('cluster4')
}
}
- stage('cluster5') {
- agent {
- label 'docker'
- }
- steps {
- prepareNode()
- unstash "sourceFILES"
- clusterRunner('cluster5')
- }
- }
}
}
}
@@ -459,11 +447,8 @@ pipeline {
always {
echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
makeReport()
- sh """
- echo "$TestsReport" > TestsReport.xml
- """
step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
- archiveArtifacts '*.xml'
+ archiveArtifacts '*.xml,*.txt'
script {
if (currentBuild.result != null && currentBuild.result != 'SUCCESS') {
@@ -472,12 +457,6 @@ pipeline {
clusters.each { shutdownCluster(it) }
}
-
- sh """
- sudo docker system prune --volumes -af
- sudo rm -rf *
- """
- deleteDir()
}
}
}
diff --git a/cloud/jenkins/pso_minikube.groovy b/cloud/jenkins/pso_minikube.groovy
new file mode 100644
index 0000000000..acd9c18eaf
--- /dev/null
+++ b/cloud/jenkins/pso_minikube.groovy
@@ -0,0 +1,344 @@
+tests=[]
+release_versions="source/e2e-tests/release_versions"
+
+String getParam(String paramName, String keyName = null) {
+ keyName = keyName ?: paramName
+
+ param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim()
+ if ("$param") {
+ echo "$paramName=$param (from params file)"
+ } else {
+ error("$keyName not found in params file $release_versions")
+ }
+ return param
+}
+
+void prepareAgent() {
+ echo "=========================[ Installing tools on the Jenkins executor ]========================="
+ sh """
+ sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
+ kubectl version --client --output=yaml
+
+ curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm
+
+ sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq
+ sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq
+
+ curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf -
+ ./krew-linux_amd64 install krew
+ export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
+
+ kubectl krew install assert
+
+ # v0.17.0 kuttl version
+ kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml
+ echo \$(kubectl kuttl --version) is installed
+
+ sudo curl -sLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x /usr/local/bin/minikube
+ """
+}
+
+void prepareSources() {
+ echo "=========================[ Cloning the sources ]========================="
+ git branch: 'master', url: 'https://github.com/Percona-Lab/jenkins-pipelines'
+ sh """
+ git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator source
+ """
+
+ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim()
+ PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim()
+}
+
+void initParams() {
+ if ("$PILLAR_VERSION" != "none") {
+ echo "=========================[ Getting parameters for release test ]========================="
+ IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR")
+ IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}")
+ IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}")
+ IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}")
+ IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY")
+ IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR")
+ IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT")
+ IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT")
+ IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER")
+ if ("$PLATFORM_VER".toLowerCase() == "max") {
+ PLATFORM_VER = getParam("PLATFORM_VER", "MINIKUBE_${PLATFORM_VER}")
+ }
+ } else {
+ echo "=========================[ Not a release run. Using job params only! ]========================="
+ }
+
+ if ("$IMAGE_MYSQL") {
+ cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW"
+ currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH"
+ currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw"
+ }
+}
+
+void dockerBuildPush() {
+ echo "=========================[ Building and Pushing the operator Docker image ]========================="
+ withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
+ sh """
+ if [[ "$IMAGE_OPERATOR" ]]; then
+ echo "SKIP: Build is not needed, operator image was set!"
+ else
+ cd source
+ sg docker -c "
+ docker login -u '$USER' -p '$PASS'
+ export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
+ e2e-tests/build
+ docker logout
+ "
+ sudo rm -rf build
+ fi
+ """
+ }
+}
+
+void initTests() {
+ echo "=========================[ Initializing the tests ]========================="
+
+ echo "Populating tests into the tests array!"
+ def testList = "$TEST_LIST"
+ def suiteFileName = "source/e2e-tests/$TEST_SUITE"
+
+ if (testList.length() != 0) {
+ suiteFileName = 'source/e2e-tests/run-custom.csv'
+ sh """
+ echo -e "$testList" > $suiteFileName
+ echo "Custom test suite contains following tests:"
+ cat $suiteFileName
+ """
+ }
+
+ def records = readCSV file: suiteFileName
+
+ for (int i=0; i/dev/null 2>&1", returnStatus: true)
+
+ if (retFileExists == 0) {
+ tests[i]["result"] = "passed"
+ }
+ }
+ } else {
+ sh """
+ aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || :
+ """
+ }
+ }
+
+ withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE')]) {
+ sh """
+ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml
+ """
+ }
+}
+
+void clusterRunner(String cluster) {
+ def clusterCreated=0
+
+ for (int i=0; i= 1) {
+ currentBuild.result = 'FAILURE'
+ return true
+ }
+ retryCount++
+ return false
+ }
+ finally {
+ def timeStop = new Date().getTime()
+ def durationSec = (timeStop - timeStart) / 1000
+ tests[TEST_ID]["time"] = durationSec
+ echo "The $testName test was finished!"
+ }
+ }
+}
+
+void pushArtifactFile(String FILE_NAME) {
+ echo "Push $FILE_NAME file to S3!"
+
+ withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
+ sh """
+ touch $FILE_NAME
+ S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT
+ aws s3 ls \$S3_PATH/$FILE_NAME || :
+ aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || :
+ """
+ }
+}
+
+void makeReport() {
+ echo "=========================[ Generating Test Report ]========================="
+ testsReport = "\n"
+ for (int i = 0; i < tests.size(); i ++) {
+ testsReport += '<'+ tests[i]["result"] +'/>\n'
+ }
+ testsReport += '\n'
+
+ echo "=========================[ Generating Parameters Report ]========================="
+ pipelineParameters = """
+ testsuite name=$JOB_NAME
+ IMAGE_OPERATOR=$IMAGE_OPERATOR
+ IMAGE_MYSQL=$IMAGE_MYSQL
+ IMAGE_BACKUP=$IMAGE_BACKUP
+ IMAGE_ROUTER=$IMAGE_ROUTER
+ IMAGE_HAPROXY=$IMAGE_HAPROXY
+ IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ IMAGE_TOOLKIT=$IMAGE_TOOLKIT
+ IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
+ IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
+ PLATFORM_VER=$PLATFORM_VER
+ """
+
+ writeFile file: "TestsReport.xml", text: testsReport
+ writeFile file: 'PipelineParameters.txt', text: pipelineParameters
+}
+
+pipeline {
+ environment {
+ DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim()
+ }
+ parameters {
+ choice(name: 'TEST_SUITE', choices: ['run-minikube.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.')
+ text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line')
+ choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)')
+ choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.')
+ string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository')
+ string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'Minikube kubernetes version. If set to rel, value will be automatically taken from release_versions file.')
+ choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode')
+ string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main')
+ string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql')
+ string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup')
+ string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router')
+ string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy')
+ string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator')
+ string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit')
+ string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest')
+ string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest')
+ }
+ agent {
+ label 'docker-32gb'
+ }
+ options {
+ buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
+ skipDefaultCheckout()
+ disableConcurrentBuilds()
+ copyArtifactPermission('weekly-pso');
+ }
+ stages {
+ stage('Prepare Node') {
+ steps {
+ script { deleteDir() }
+ prepareSources()
+ initParams()
+ }
+ }
+ stage('Docker Build and Push') {
+ steps {
+ dockerBuildPush()
+ }
+ }
+ stage('Init Tests') {
+ steps {
+ initTests()
+ }
+ }
+ stage('Run Tests') {
+ options {
+ timeout(time: 3, unit: 'HOURS')
+ }
+ parallel {
+ stage('cluster1') {
+ steps {
+ prepareAgent()
+ clusterRunner('cluster1')
+ }
+ }
+ }
+ }
+ }
+ post {
+ always {
+ echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
+ makeReport()
+ step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
+ archiveArtifacts '*.xml,*.txt'
+
+ script {
+ if (currentBuild.result != null && currentBuild.result != 'SUCCESS') {
+ slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL"
+ }
+ }
+ }
+ }
+}
diff --git a/cloud/jenkins/pso_openshift.groovy b/cloud/jenkins/pso_openshift.groovy
new file mode 100644
index 0000000000..a3011cd84b
--- /dev/null
+++ b/cloud/jenkins/pso_openshift.groovy
@@ -0,0 +1,468 @@
+region='eu-west-2'
+tests=[]
+clusters=[]
+release_versions="source/e2e-tests/release_versions"
+
+String getParam(String paramName, String keyName = null) {
+ keyName = keyName ?: paramName
+
+ param = sh(script: "grep -iE '^\\s*$keyName=' $release_versions | cut -d = -f 2 | tr -d \'\"\'| tail -1", returnStdout: true).trim()
+ if ("$param") {
+ echo "$paramName=$param (from params file)"
+ } else {
+ error("$keyName not found in params file $release_versions")
+ }
+ return param
+}
+
+void prepareAgent() {
+ echo "=========================[ Installing tools on the Jenkins executor ]========================="
+ sh """
+ sudo curl -s -L -o /usr/local/bin/kubectl https://dl.k8s.io/release/\$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x /usr/local/bin/kubectl
+ kubectl version --client --output=yaml
+
+ curl -fsSL https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz | sudo tar -C /usr/local/bin --strip-components 1 -xzf - linux-amd64/helm
+
+ sudo curl -fsSL https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 -o /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq
+ sudo curl -fsSL https://github.com/jqlang/jq/releases/download/jq-1.7.1/jq-linux64 -o /usr/local/bin/jq && sudo chmod +x /usr/local/bin/jq
+
+ curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew-linux_amd64.tar.gz | tar -xzf -
+ ./krew-linux_amd64 install krew
+ export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
+
+ kubectl krew install assert
+
+ # v0.17.0 kuttl version
+ kubectl krew install --manifest-url https://raw.githubusercontent.com/kubernetes-sigs/krew-index/336ef83542fd2f783bfa2c075b24599e834dcc77/plugins/kuttl.yaml
+ echo \$(kubectl kuttl --version) is installed
+
+ curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$OC_VER/openshift-client-linux.tar.gz | sudo tar -C /usr/local/bin -xzf - oc
+ curl -s -L https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$PLATFORM_VER/openshift-install-linux.tar.gz | sudo tar -C /usr/local/bin -xzf - openshift-install
+ """
+}
+
+void prepareSources() {
+ echo "=========================[ Cloning the sources ]========================="
+ sh """
+ git clone -b $GIT_BRANCH https://github.com/percona/percona-server-mysql-operator.git source
+ """
+ GIT_SHORT_COMMIT = sh(script: 'git -C source rev-parse --short HEAD', returnStdout: true).trim()
+ PARAMS_HASH = sh(script: "echo $GIT_BRANCH-$GIT_SHORT_COMMIT-$PLATFORM_VER-$CLUSTER_WIDE-$IMAGE_OPERATOR-$IMAGE_MYSQL-$IMAGE_BACKUP-$IMAGE_ROUTER-$IMAGE_HAPROXY-$IMAGE_ORCHESTRATOR-$IMAGE_TOOLKIT-$IMAGE_PMM_CLIENT-$IMAGE_PMM_SERVER | md5sum | cut -d' ' -f1", returnStdout: true).trim()
+ CLUSTER_NAME = sh(script: "echo $JOB_NAME-$GIT_SHORT_COMMIT | tr '[:upper:]' '[:lower:]'", returnStdout: true).trim()
+}
+
+void initParams() {
+ if ("$PILLAR_VERSION" != "none") {
+ echo "=========================[ Getting parameters for release test ]========================="
+ IMAGE_OPERATOR = IMAGE_OPERATOR ?: getParam("IMAGE_OPERATOR")
+ IMAGE_MYSQL = IMAGE_MYSQL ?: getParam("IMAGE_MYSQL", "IMAGE_MYSQL${PILLAR_VERSION}")
+ IMAGE_BACKUP = IMAGE_BACKUP ?: getParam("IMAGE_BACKUP", "IMAGE_BACKUP${PILLAR_VERSION}")
+ IMAGE_ROUTER = IMAGE_ROUTER ?: getParam("IMAGE_ROUTER", "IMAGE_ROUTER${PILLAR_VERSION}")
+ IMAGE_HAPROXY = IMAGE_HAPROXY ?: getParam("IMAGE_HAPROXY")
+ IMAGE_ORCHESTRATOR = IMAGE_ORCHESTRATOR ?: getParam("IMAGE_ORCHESTRATOR")
+ IMAGE_TOOLKIT = IMAGE_TOOLKIT ?: getParam("IMAGE_TOOLKIT")
+ IMAGE_PMM_CLIENT = IMAGE_PMM_CLIENT ?: getParam("IMAGE_PMM_CLIENT")
+ IMAGE_PMM_SERVER = IMAGE_PMM_SERVER ?: getParam("IMAGE_PMM_SERVER")
+ if ("$PLATFORM_VER".toLowerCase() == "min" || "$PLATFORM_VER".toLowerCase() == "max") {
+ PLATFORM_VER = getParam("PLATFORM_VER", "OPENSHIFT_${PLATFORM_VER}")
+ }
+ } else {
+ echo "=========================[ Not a release run. Using job params only! ]========================="
+ }
+
+ if ("$PLATFORM_VER" == "latest") {
+ OC_VER = "4.15.25"
+ PLATFORM_VER = sh(script: "curl -s https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/$PLATFORM_VER/release.txt | sed -n 's/^\\s*Version:\\s\\+\\(\\S\\+\\)\\s*\$/\\1/p'", returnStdout: true).trim()
+ } else {
+ if ("$PLATFORM_VER" <= "4.15.25") {
+ OC_VER="$PLATFORM_VER"
+ } else {
+ OC_VER="4.15.25"
+ }
+ }
+ echo "OC_VER=$OC_VER"
+
+ if ("$IMAGE_MYSQL") {
+ cw = ("$CLUSTER_WIDE" == "YES") ? "CW" : "NON-CW"
+ currentBuild.displayName = "#" + currentBuild.number + " $GIT_BRANCH"
+ currentBuild.description = "$PLATFORM_VER " + "$IMAGE_MYSQL".split(":")[1] + " $cw"
+ }
+}
+
+void dockerBuildPush() {
+ echo "=========================[ Building and Pushing the operator Docker image ]========================="
+ withCredentials([usernamePassword(credentialsId: 'hub.docker.com', passwordVariable: 'PASS', usernameVariable: 'USER')]) {
+ sh """
+ if [[ "$IMAGE_OPERATOR" ]]; then
+ echo "SKIP: Build is not needed, operator image was set!"
+ else
+ cd source
+ sg docker -c "
+ docker login -u '$USER' -p '$PASS'
+ export IMAGE=perconalab/percona-server-mysql-operator:$GIT_BRANCH
+ e2e-tests/build
+ docker logout
+ "
+ sudo rm -rf build
+ fi
+ """
+ }
+}
+
+void initTests() {
+ echo "=========================[ Initializing the tests ]========================="
+
+ echo "Populating tests into the tests array!"
+ def testList = "$TEST_LIST"
+ def suiteFileName = "source/e2e-tests/$TEST_SUITE"
+
+ if (testList.length() != 0) {
+ suiteFileName = 'source/e2e-tests/run-custom.csv'
+ sh """
+ echo -e "$testList" > $suiteFileName
+ echo "Custom test suite contains following tests:"
+ cat $suiteFileName
+ """
+ }
+
+ def records = readCSV file: suiteFileName
+
+ for (int i=0; i/dev/null 2>&1", returnStatus: true)
+
+ if (retFileExists == 0) {
+ tests[i]["result"] = "passed"
+ }
+ }
+ } else {
+ sh """
+ aws s3 rm "s3://percona-jenkins-artifactory/$JOB_NAME/$GIT_SHORT_COMMIT/" --recursive --exclude "*" --include "*-$PARAMS_HASH" || :
+ """
+ }
+ }
+
+ withCredentials([file(credentialsId: 'cloud-secret-file-ps', variable: 'CLOUD_SECRET_FILE'), file(credentialsId: 'cloud-minio-secret-file', variable: 'CLOUD_MINIO_SECRET_FILE')]) {
+ sh """
+ cp $CLOUD_SECRET_FILE source/e2e-tests/conf/cloud-secret.yml
+ cp $CLOUD_MINIO_SECRET_FILE source/e2e-tests/conf/cloud-secret-minio-gw.yml
+ """
+ }
+ stash includes: "source/**", name: "sourceFILES"
+}
+
+void clusterRunner(String cluster) {
+ def clusterCreated=0
+
+ for (int i=0; i= 1) {
+ shutdownCluster(cluster)
+ }
+}
+
+void createCluster(String CLUSTER_SUFFIX) {
+ clusters.add("$CLUSTER_SUFFIX")
+
+ withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift4-secrets', variable: 'OPENSHIFT_CONF_FILE')]) {
+ sh """
+ mkdir -p openshift/$CLUSTER_SUFFIX
+ timestamp="\$(date +%s)"
+tee openshift/$CLUSTER_SUFFIX/install-config.yaml << EOF
+additionalTrustBundlePolicy: Proxyonly
+credentialsMode: Mint
+apiVersion: v1
+baseDomain: cd.percona.com
+compute:
+- architecture: amd64
+ hyperthreading: Enabled
+ name: worker
+ platform:
+ aws:
+ type: m5.2xlarge
+ replicas: 3
+controlPlane:
+ architecture: amd64
+ hyperthreading: Enabled
+ name: master
+ platform: {}
+ replicas: 1
+metadata:
+ creationTimestamp: null
+ name: $CLUSTER_NAME-$CLUSTER_SUFFIX
+networking:
+ clusterNetwork:
+ - cidr: 10.128.0.0/14
+ hostPrefix: 23
+ machineNetwork:
+ - cidr: 10.0.0.0/16
+ networkType: OVNKubernetes
+ serviceNetwork:
+ - 172.30.0.0/16
+platform:
+ aws:
+ region: $region
+ userTags:
+ iit-billing-tag: openshift
+ delete-cluster-after-hours: 8
+ team: cloud
+ product: ps-operator
+ creation-time: \$timestamp
+
+publish: External
+EOF
+ cat $OPENSHIFT_CONF_FILE >> openshift/$CLUSTER_SUFFIX/install-config.yaml
+ """
+
+ sshagent(['aws-openshift-41-key']) {
+ sh """
+ /usr/local/bin/openshift-install create cluster --dir=openshift/$CLUSTER_SUFFIX
+ export KUBECONFIG=openshift/$CLUSTER_SUFFIX/auth/kubeconfig
+ """
+ }
+ }
+}
+
+void runTest(Integer TEST_ID) {
+ def retryCount = 0
+ def testName = tests[TEST_ID]["name"]
+ def clusterSuffix = tests[TEST_ID]["cluster"]
+
+ waitUntil {
+ def timeStart = new Date().getTime()
+ try {
+ echo "The $testName test was started on cluster $CLUSTER_NAME-$clusterSuffix !"
+ tests[TEST_ID]["result"] = "failure"
+
+ timeout(time: 90, unit: 'MINUTES') {
+ sh """
+ cd source
+
+ export DEBUG_TESTS=1
+ [[ "$CLUSTER_WIDE" == "YES" ]] && export OPERATOR_NS=ps-operator
+ export IMAGE=$IMAGE_OPERATOR
+ export IMAGE_MYSQL=$IMAGE_MYSQL
+ export IMAGE_BACKUP=$IMAGE_BACKUP
+ export IMAGE_ROUTER=$IMAGE_ROUTER
+ export IMAGE_HAPROXY=$IMAGE_HAPROXY
+ export IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ export IMAGE_TOOLKIT=$IMAGE_TOOLKIT
+ export IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
+ export IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
+ export KUBECONFIG=/tmp/$CLUSTER_NAME-$clusterSuffix
+ export PATH="\${KREW_ROOT:-\$HOME/.krew}/bin:\$PATH"
+
+ kubectl kuttl test --config e2e-tests/kuttl.yaml --test "^$testName\$"
+ """
+ }
+ pushArtifactFile("$GIT_BRANCH-$GIT_SHORT_COMMIT-$testName-$PLATFORM_VER-$DB_TAG-CW_$CLUSTER_WIDE-$PARAMS_HASH")
+ tests[TEST_ID]["result"] = "passed"
+ return true
+ }
+ catch (exc) {
+ echo "Error occurred while running test $testName: $exc"
+ if (retryCount >= 1) {
+ currentBuild.result = 'FAILURE'
+ return true
+ }
+ retryCount++
+ return false
+ }
+ finally {
+ def timeStop = new Date().getTime()
+ def durationSec = (timeStop - timeStart) / 1000
+ tests[TEST_ID]["time"] = durationSec
+ echo "The $testName test was finished!"
+ }
+ }
+}
+
+void pushArtifactFile(String FILE_NAME) {
+ echo "Push $FILE_NAME file to S3!"
+
+ withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AMI/OVF', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
+ sh """
+ touch $FILE_NAME
+ S3_PATH=s3://percona-jenkins-artifactory/\$JOB_NAME/$GIT_SHORT_COMMIT
+ aws s3 ls \$S3_PATH/$FILE_NAME || :
+ aws s3 cp --quiet $FILE_NAME \$S3_PATH/$FILE_NAME || :
+ """
+ }
+}
+
+void makeReport() {
+ echo "=========================[ Generating Test Report ]========================="
+ testsReport = "\n"
+ for (int i = 0; i < tests.size(); i ++) {
+ testsReport += '<'+ tests[i]["result"] +'/>\n'
+ }
+ testsReport += '\n'
+
+ echo "=========================[ Generating Parameters Report ]========================="
+ pipelineParameters = """
+ testsuite name=$JOB_NAME
+ IMAGE_OPERATOR=$IMAGE_OPERATOR
+ IMAGE_MYSQL=$IMAGE_MYSQL
+ IMAGE_BACKUP=$IMAGE_BACKUP
+ IMAGE_ROUTER=$IMAGE_ROUTER
+ IMAGE_HAPROXY=$IMAGE_HAPROXY
+ IMAGE_ORCHESTRATOR=$IMAGE_ORCHESTRATOR
+ IMAGE_TOOLKIT=$IMAGE_TOOLKIT
+ IMAGE_PMM_CLIENT=$IMAGE_PMM_CLIENT
+ IMAGE_PMM_SERVER=$IMAGE_PMM_SERVER
+ PLATFORM_VER=$PLATFORM_VER
+ """
+
+ writeFile file: "TestsReport.xml", text: testsReport
+ writeFile file: 'PipelineParameters.txt', text: pipelineParameters
+}
+
+void shutdownCluster(String CLUSTER_SUFFIX) {
+ withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'openshift-cicd'], file(credentialsId: 'aws-openshift-41-key-pub', variable: 'AWS_NODES_KEY_PUB'), file(credentialsId: 'openshift-secret-file', variable: 'OPENSHIFT-CONF-FILE')]) {
+ sshagent(['aws-openshift-41-key']) {
+ sh """
+ export KUBECONFIG=$WORKSPACE/openshift/$CLUSTER_SUFFIX/auth/kubeconfig
+ for namespace in \$(kubectl get namespaces --no-headers | awk '{print \$1}' | grep -vE "^kube-|^openshift" | sed '/-operator/ s/^/1-/' | sort | sed 's/^1-//'); do
+ kubectl delete deployments --all -n \$namespace --force --grace-period=0 || true
+ kubectl delete sts --all -n \$namespace --force --grace-period=0 || true
+ kubectl delete replicasets --all -n \$namespace --force --grace-period=0 || true
+ kubectl delete poddisruptionbudget --all -n \$namespace --force --grace-period=0 || true
+ kubectl delete services --all -n \$namespace --force --grace-period=0 || true
+ kubectl delete pods --all -n \$namespace --force --grace-period=0 || true
+ done
+ kubectl get svc --all-namespaces || true
+ /usr/local/bin/openshift-install destroy cluster --dir=openshift/$CLUSTER_SUFFIX || true
+ """
+ }
+ }
+}
+
+pipeline {
+ environment {
+ DB_TAG = sh(script: "[[ \$IMAGE_MYSQL ]] && echo \$IMAGE_MYSQL | awk -F':' '{print \$2}' || echo main", returnStdout: true).trim()
+ }
+ parameters {
+ choice(name: 'TEST_SUITE', choices: ['run-release.csv', 'run-distro.csv'], description: 'Choose test suite from file (e2e-tests/run-*), used only if TEST_LIST not specified.')
+ text(name: 'TEST_LIST', defaultValue: '', description: 'List of tests to run separated by new line')
+ choice(name: 'IGNORE_PREVIOUS_RUN', choices: 'NO\nYES', description: 'Ignore passed tests in previous run (run all)')
+ choice(name: 'PILLAR_VERSION', choices: 'none\n80', description: 'Implies release run.')
+ string(name: 'GIT_BRANCH', defaultValue: 'main', description: 'Tag/Branch for percona/percona-server-mysql-operator repository')
+ string(name: 'PLATFORM_VER', defaultValue: 'latest', description: 'OpenShift kubernetes version. If set to min or max, value will be automatically taken from release_versions file.')
+ choice(name: 'CLUSTER_WIDE', choices: 'YES\nNO', description: 'Run tests in cluster wide mode')
+ string(name: 'IMAGE_OPERATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main')
+ string(name: 'IMAGE_MYSQL', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-psmysql')
+ string(name: 'IMAGE_BACKUP', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-backup')
+ string(name: 'IMAGE_ROUTER', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-router')
+ string(name: 'IMAGE_HAPROXY', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-haproxy')
+ string(name: 'IMAGE_ORCHESTRATOR', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-orchestrator')
+ string(name: 'IMAGE_TOOLKIT', defaultValue: '', description: 'ex: perconalab/percona-server-mysql-operator:main-toolkit')
+ string(name: 'IMAGE_PMM_CLIENT', defaultValue: '', description: 'ex: perconalab/pmm-client:dev-latest')
+ string(name: 'IMAGE_PMM_SERVER', defaultValue: '', description: 'ex: perconalab/pmm-server:dev-latest')
+ }
+ agent {
+ label 'docker'
+ }
+ options {
+ buildDiscarder(logRotator(daysToKeepStr: '-1', artifactDaysToKeepStr: '-1', numToKeepStr: '30', artifactNumToKeepStr: '30'))
+ skipDefaultCheckout()
+ disableConcurrentBuilds()
+ copyArtifactPermission('weekly-pso');
+ }
+ stages {
+ stage('Prepare Node') {
+ steps {
+ script { deleteDir() }
+ prepareSources()
+ initParams()
+ prepareAgent()
+ }
+ }
+ stage('Docker Build and Push') {
+ steps {
+ dockerBuildPush()
+ }
+ }
+ stage('Init Tests') {
+ steps {
+ initTests()
+ }
+ }
+ stage('Run Tests') {
+ options {
+ timeout(time: 3, unit: 'HOURS')
+ }
+ parallel {
+ stage('cluster1') {
+ agent { label 'docker' }
+ steps {
+ prepareAgent()
+ unstash "sourceFILES"
+ clusterRunner('c1')
+ }
+ }
+ stage('cluster2') {
+ agent { label 'docker' }
+ steps {
+ prepareAgent()
+ unstash "sourceFILES"
+ clusterRunner('c2')
+ }
+ }
+ stage('cluster3') {
+ agent { label 'docker' }
+ steps {
+ prepareAgent()
+ unstash "sourceFILES"
+ clusterRunner('c3')
+ }
+ }
+ stage('cluster4') {
+ agent { label 'docker' }
+ steps {
+ prepareAgent()
+ unstash "sourceFILES"
+ clusterRunner('c4')
+ }
+ }
+ }
+ }
+ }
+ post {
+ always {
+ echo "CLUSTER ASSIGNMENTS\n" + tests.toString().replace("], ","]\n").replace("]]","]").replaceFirst("\\[","")
+ makeReport()
+ step([$class: 'JUnitResultArchiver', testResults: '*.xml', healthScaleFactor: 1.0])
+ archiveArtifacts '*.xml,*.txt'
+
+ script {
+ if (currentBuild.result != null && currentBuild.result != 'SUCCESS') {
+ slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[$JOB_NAME]: build $currentBuild.result, $BUILD_URL"
+ }
+
+ clusters.each { shutdownCluster(it) }
+ }
+ }
+ }
+}
diff --git a/cloud/jenkins/ps-operator-latest-scheduler.yml b/cloud/jenkins/weekly-pso.yml
similarity index 77%
rename from cloud/jenkins/ps-operator-latest-scheduler.yml
rename to cloud/jenkins/weekly-pso.yml
index e13de9639d..8715ce89f4 100644
--- a/cloud/jenkins/ps-operator-latest-scheduler.yml
+++ b/cloud/jenkins/weekly-pso.yml
@@ -1,5 +1,5 @@
- job:
- name: ps-operator-latest-scheduler
+ name: weekly-pso
project-type: pipeline
description: |
Do not edit this job through the web!
@@ -13,5 +13,4 @@
- 'master'
wipe-workspace: false
lightweight-checkout: true
- script-path: cloud/jenkins/ps-operator-latest-scheduler.groovy
-
+ script-path: cloud/jenkins/weekly_pso.groovy
\ No newline at end of file
diff --git a/cloud/jenkins/weekly_pso.groovy b/cloud/jenkins/weekly_pso.groovy
new file mode 100644
index 0000000000..dce5d80376
--- /dev/null
+++ b/cloud/jenkins/weekly_pso.groovy
@@ -0,0 +1,47 @@
+void triggerJobMultiple(String jobName) {
+ for (int i = 1; i <= 3; i++) {
+ build job: "$jobName", propagate: false, wait: true
+ }
+}
+
+pipeline {
+ agent any
+ options {
+ skipDefaultCheckout()
+ disableConcurrentBuilds()
+ buildDiscarder(logRotator(numToKeepStr: '10', artifactNumToKeepStr: '10'))
+ }
+ triggers {
+ cron('0 8 * * 0')
+ }
+ stages {
+ stage("Run parallel") {
+ parallel {
+ stage('Trigger pso-gke job 3 times') {
+ steps {
+ triggerJobMultiple("pso-gke")
+ }
+ }
+ stage('Trigger pso-eks job 3 times') {
+ steps {
+ triggerJobMultiple("pso-eks")
+ }
+ }
+ stage('Trigger pso-os job 3 times') {
+ steps {
+ triggerJobMultiple("pso-os")
+ }
+ }
+ }
+ }
+ }
+ post {
+ always {
+ copyArtifacts(projectName: 'pso-gke', selector: lastCompleted(), target: 'pso-gke')
+ copyArtifacts(projectName: 'pso-eks', selector: lastCompleted(), target: 'pso-eks')
+ copyArtifacts(projectName: 'pso-os', selector: lastCompleted(), target: 'pso-os')
+ archiveArtifacts '*/*.xml'
+ step([$class: 'JUnitResultArchiver', testResults: '*/*.xml', healthScaleFactor: 1.0])
+ }
+ }
+}