diff --git a/.github/workflows/build-k3s.yaml b/.github/workflows/build-k3s.yaml index 47ba18e247ee..fde93e00d108 100644 --- a/.github/workflows/build-k3s.yaml +++ b/.github/workflows/build-k3s.yaml @@ -3,6 +3,10 @@ name: Build K3s on: workflow_call: inputs: + arch: + type: string + description: 'Architecture to build' + default: 'ubuntu-latest' upload-repo: type: boolean required: false @@ -18,7 +22,7 @@ permissions: jobs: build: name: Build - runs-on: ubuntu-latest + runs-on: ${{ inputs.arch }} # defaults to ubuntu-latest, for arm64 use ubuntu-24.04-arm timeout-minutes: 20 steps: - name: Checkout K3s @@ -44,9 +48,15 @@ jobs: - name: "Save K3s image" if: inputs.upload-image == true run: docker image save rancher/k3s -o ./dist/artifacts/k3s-image.tar - - name: "Upload K3s binary" - if: inputs.upload-repo == false + - name: "Upload K3s Artifacts" + if: inputs.upload-repo == false && inputs.arch == 'ubuntu-latest' uses: actions/upload-artifact@v4 with: name: k3s + path: dist/artifacts/k3s* + - name: "Upload K3s arm64 Artifacts" + if: contains(inputs.arch, 'arm') + uses: actions/upload-artifact@v4 + with: + name: k3s-arm64 path: dist/artifacts/k3s* \ No newline at end of file diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index e0ee6c0a2e4d..51fb25cd680d 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -32,6 +32,11 @@ jobs: uses: ./.github/workflows/build-k3s.yaml with: upload-image: true + build-arm64: + uses: ./.github/workflows/build-k3s.yaml + with: + arch: ubuntu-24.04-arm + upload-image: true e2e: name: "E2E Tests" needs: build @@ -100,40 +105,15 @@ jobs: files: tests/e2e/${{ matrix.etest }}/coverage.out flags: e2etests # optional verbose: true # optional (default = false) - docker: - needs: build - name: Docker Tests - runs-on: ubuntu-latest - timeout-minutes: 20 - strategy: - fail-fast: false - matrix: - dtest: [basics, bootstraptoken, cacerts, compat, lazypull, upgrade] - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: "Download k3s image" - uses: actions/download-artifact@v4 - with: - name: k3s - path: ./dist/artifacts - - name: Load k3s image - run: docker image load -i ./dist/artifacts/k3s-image.tar - - name: Run ${{ matrix.dtest }} Test - run: | - chmod +x ./dist/artifacts/k3s - . ./scripts/version.sh - . ./tests/docker/test-helpers - . ./tests/docker/test-run-${{ matrix.dtest }} - echo "Did test-run-${{ matrix.dtest }} pass $?" build-go-tests: name: "Build Go Tests" - runs-on: ubuntu-latest + strategy: + matrix: + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} outputs: - branch_name: ${{ steps.branch_step.outputs.BRANCH_NAME }} + channel: ${{ steps.channel_step.outputs.channel }} steps: - name: Checkout uses: actions/checkout@v4 @@ -146,69 +126,66 @@ jobs: - name: Upload Go Tests uses: actions/upload-artifact@v4 with: - name: docker-go-tests + name: docker-go-tests-${{ matrix.arch }} path: ./dist/artifacts/*.test compression-level: 9 retention-days: 1 - # For upgrade and skew tests, we need to know the branch name this run is based off. - # Since this is predetermined, we can run this step before the docker-go job, saving time. - # For PRs we can use the base_ref (ie the target branch of the PR). - # For pushes to k3s-io/k3s, the branch_name is a valid ref, master or release-x.y. - # For pushes to a fork, we need to determine the branch name by finding the parent branch from git show-branch history. - - name: Determine branch name - id: branch_step + # For upgrade and skew tests, we need to know the channel this run is based off. + # Since this is predetermined, we can run this step before the actual test job, saving time. + - name: Determine channel + id: channel_step run: | - if [ ${{ github.repository }} = "k3s-io/k3s" ]; then - BRANCH_NAME=$(echo ${{ github.base_ref || github.ref_name }}) - elif [ -z "${{ github.base_ref }}" ]; then - # We are in a fork, and need some git history to determine the branch name - # For some reason, the first fetch doesn't always get the full history, so we sleep and fetch again - git fetch origin --depth=100 +refs/heads/*:refs/remotes/origin/* - sleep 5 - git fetch origin --depth=100 +refs/heads/*:refs/remotes/origin/* - BRANCH_NAME=$(git show-branch -a 2> /dev/null | grep '\*' | grep -v `git rev-parse --abbrev-ref HEAD` | head -n1 | sed 's/.*\[\(.*\/\)\(.*\)\].*/\2/' | sed 's/[\^~].*//') - else - BRANCH_NAME=${{ github.base_ref }} - fi - echo "Branch Name is $BRANCH_NAME" - echo "BRANCH_NAME=$BRANCH_NAME" >> $GITHUB_OUTPUT - # branch name should be either master or release-1.XX - - name: Fail if branch name does not match pattern + . ./scripts/version.sh + MINOR_VER=$(echo $VERSION_TAG | cut -d'.' -f1,2) + echo "CHANNEL=$MINOR_VER" >> $GITHUB_OUTPUT + # channel name should be v1.XX or latest + - name: Fail if channel name does not match pattern run: | - if [[ ! ${{ steps.branch_step.outputs.branch_name }} =~ ^(master|release-[0-9]+\.[0-9]+)$ ]]; then - echo "Branch name ${{ steps.branch_step.outputs.branch_name }} does not match pattern" - echo "If this is a PR/fork, ensure you have recently rebased off master/release-1.XX branch" + if [[ ! ${{ steps.channel_step.outputs.channel }} =~ ^v1\.[0-9]+$|latest$ ]]; then + echo "Channel name ${{ steps.channel_step.outputs.channel }} does not match pattern" exit 1 fi - + docker-go: - needs: [build, build-go-tests] - name: Docker Tests In GO - runs-on: ubuntu-latest - timeout-minutes: 20 + needs: [build, build-arm64, build-go-tests] + name: Docker + timeout-minutes: 30 strategy: fail-fast: false matrix: - dtest: [basics, bootstraptoken, cacerts, etcd, lazypull, skew, upgrade] + dtest: [basics, bootstraptoken, cacerts, etcd, lazypull, skew, snapshotrestore, upgrade] + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }} env: - BRANCH_NAME: ${{ needs.build-go-tests.outputs.branch_name }} + CHANNEL: ${{ needs.build-go-tests.outputs.channel }} steps: - name: Checkout uses: actions/checkout@v4 - - name: "Download K3s image" + - name: "Download K3s image (amd64)" + if: ${{ matrix.arch == 'amd64' }} uses: actions/download-artifact@v4 with: name: k3s path: ./dist/artifacts + - name: "Download K3s image (arm64)" + if: ${{ matrix.arch == 'arm64' }} + uses: actions/download-artifact@v4 + with: + name: k3s-arm64 + path: ./dist/artifacts - name: Load and set K3s image run: | + if [ ${{ matrix.arch }} = "arm64" ]; then + mv ./dist/artifacts/k3s-arm64 ./dist/artifacts/k3s + fi + chmod +x ./dist/artifacts/k3s docker image load -i ./dist/artifacts/k3s-image.tar IMAGE_TAG=$(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep 'rancher/k3s') echo "K3S_IMAGE=$IMAGE_TAG" >> $GITHUB_ENV - name: Download Go Tests uses: actions/download-artifact@v4 with: - name: docker-go-tests + name: docker-go-tests-${{ matrix.arch }} path: ./dist/artifacts - name: Run ${{ matrix.dtest }} Test # Put the compied test binary back in the same place as the test source @@ -217,7 +194,9 @@ jobs: mv ./dist/artifacts/${{ matrix.dtest }}.test ./tests/docker/${{ matrix.dtest }}/ cd ./tests/docker/${{ matrix.dtest }} if [ ${{ matrix.dtest }} = "upgrade" ] || [ ${{ matrix.dtest }} = "skew" ]; then - ./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE -branch=$BRANCH_NAME + ./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE -channel=$CHANNEL + elif [ ${{ matrix.dtest }} = "snapshotrestore" ]; then + ./${{ matrix.dtest }}.test -ci else ./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE fi \ No newline at end of file diff --git a/Dockerfile.test b/Dockerfile.test index b4d801414a51..ff24f7c7515c 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -36,15 +36,23 @@ ENTRYPOINT ["./scripts/entry.sh"] CMD ["test"] -FROM vagrantlibvirt/vagrant-libvirt:0.12.1 AS test-e2e -RUN apt-get update && apt-get install -y docker.io +FROM vagrantlibvirt/vagrant-libvirt:sha-a94ce0d AS test-e2e +RUN apt-get update && apt-get install -y docker.io wget + ENV VAGRANT_DISABLE_STRICT_DEPENDENCY_ENFORCEMENT=1 RUN vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp -RUN vagrant box add bento/ubuntu-24.04 --provider libvirt --force + +# Workaround for older vagrant-libvirt image and new vagrant infra wesbites +# See https://github.com/hashicorp/vagrant/issues/13571 and +# https://github.com/vagrant-libvirt/vagrant-libvirt/issues/1840 +RUN wget https://app.vagrantup.com/bento/boxes/ubuntu-24.04/versions/202404.26.0/providers/libvirt.box -O bento-ubuntu24.04-202404.26.0.box +RUN vagrant box add bento/ubuntu-24.04 bento-ubuntu24.04-202404.26.0.box +RUN cd /.vagrant.d/boxes/bento-VAGRANTSLASH-ubuntu-24.04/ && mv 0 202404.26.0 && echo -n "https://app.vagrantup.com/bento/boxes/ubuntu-24.04" > metadata_url + RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \ chmod +x ./kubectl; \ mv ./kubectl /usr/local/bin/kubectl -RUN GO_VERSION=go1.21.11; \ +RUN GO_VERSION=go1.22.5; \ curl -O -L "https://golang.org/dl/${GO_VERSION}.linux-amd64.tar.gz"; \ rm -rf /usr/local/go; \ tar -C /usr/local -xzf ${GO_VERSION}.linux-amd64.tar.gz; diff --git a/scripts/test b/scripts/test index c1a2ed22ffba..f5585690d373 100755 --- a/scripts/test +++ b/scripts/test @@ -19,41 +19,41 @@ mkdir -p $artifacts docker ps +export K3S_IMAGE="rancher/k3s:${VERSION_TAG}${SUFFIX}" + # --- -# Only run basic tests on non amd64 archs, we use GitHub Actions for amd64 -if [ "$ARCH" != 'amd64' ]; then +# Only run PR tests on arm arch, we use GitHub Actions for amd64 and arm64 +# Run all tests on tag events, as we want test failures to block the release +if [ "$ARCH" == 'arm' ] || [ "$DRONE_BUILD_EVENT" = 'tag' ]; then - export K3S_IMAGE="rancher/k3s:${VERSION_TAG}${SUFFIX}" go test ./tests/docker/basics/basics_test.go -k3sImage="$K3S_IMAGE" echo "Did go test basics $?" - . ./tests/docker/test-run-basics - echo "Did test-run-basics $?" + # Extract v1.XX minor version for skew and upgrade tests + minor_version=$(echo $VERSION_K8S | cut -d '.' -f1,2) - . ./tests/docker/test-run-cacerts - echo "Did test-run-cacerts $?" + go test ./tests/docker/cacerts/cacerts_test.go -k3sImage="$K3S_IMAGE" + echo "Did go test cacerts $?" - . ./tests/docker/test-run-compat - echo "Did test-run-compat $?" + go test ./tests/docker/skew/skew_test.go -k3sImage="$K3S_IMAGE" -channel="$minor_version" + echo "Did go test skew $?" - . ./tests/docker/test-run-bootstraptoken - echo "Did test-run-bootstraptoken $?" + go test ./tests/docker/bootstraptoken/bootstraptoken_test.go -k3sImage="$K3S_IMAGE" + echo "Did go test bootstraptoken $?" - . ./tests/docker/test-run-upgrade - echo "Did test-run-upgrade $?" + go test ./tests/docker/upgrade/upgrade_test.go -k3sImage="$K3S_IMAGE" -channel="$minor_version" + echo "Did go test upgrade $?" + + go test ./tests/docker/lazypull/lazypull_test.go -k3sImage="$K3S_IMAGE" + echo "Did go test lazypull $?" - . ./tests/docker/test-run-lazypull - echo "Did test-run-lazypull $?" fi - +#TODO convert this to new go test framework . ./tests/docker/test-run-hardened echo "Did test-run-hardened $?" -. ./tests/docker/test-run-etcd -echo "Did test-run-etcd $?" - # --- [ "$ARCH" != 'amd64' ] && \ @@ -71,10 +71,10 @@ fi # --- if [ "$DRONE_BUILD_EVENT" = 'cron' ]; then - E2E_OUTPUT=$artifacts test-run-sonobuoy serial - echo "Did test-run-sonobuoy serial $?" - test-run-sonobuoy etcd serial - echo "Did test-run-sonobuoy-etcd serial $?" + run-go-test ./test/docker/conformance/conformance_test.go -k3sImage="$K3S_IMAGE" -db sqlite -serial -ginkgo.v + echo "Did go conformance sqlite serial $?" + run-go-test ./test/docker/conformance/conformance_test.go -k3sImage="$K3S_IMAGE" -db etcd -serial -ginkgo.v + echo "Did go conformance etcd serial $?" test-run-sonobuoy mysql serial echo "Did test-run-sonobuoy-mysqk serial $?" test-run-sonobuoy postgres serial @@ -91,8 +91,10 @@ if [ "$DRONE_BUILD_EVENT" = 'cron' ]; then E2E_OUTPUT=$artifacts test-run-sonobuoy parallel echo "Did test-run-sonobuoy parallel $?" - test-run-sonobuoy etcd parallel - echo "Did test-run-sonobuoy-etcd parallel $?" + run-go-test ./test/docker/conformance/conformance_test.go -k3sImage="$K3S_IMAGE" -db sqlite -ginkgo.v + echo "Did go conformance sqlite parallel $?" + run-go-test ./test/docker/conformance/conformance_test.go -k3sImage="$K3S_IMAGE" -db etcd -ginkgo.v + echo "Did go test conformance etcd parallel $?" test-run-sonobuoy mysql parallel echo "Did test-run-sonobuoy-mysql parallel $?" test-run-sonobuoy postgres parallel diff --git a/tests/client.go b/tests/client.go new file mode 100644 index 000000000000..7438bc5849d7 --- /dev/null +++ b/tests/client.go @@ -0,0 +1,149 @@ +package tests + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/set" +) + +// This file consolidates functions that are used across multiple testing frameworks. +// Most of it relates to interacting with the Kubernetes API and checking the status of resources. + +// CheckDefaultDeployments checks if the standard array of K3s deployments are ready, otherwise returns an error +func CheckDefaultDeployments(kubeconfigFile string) error { + return CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, kubeconfigFile) +} + +// CheckDeployments checks if the provided list of deployments are ready, otherwise returns an error +func CheckDeployments(deployments []string, kubeconfigFile string) error { + + deploymentSet := make(map[string]bool) + for _, d := range deployments { + deploymentSet[d] = false + } + + client, err := K8sClient(kubeconfigFile) + if err != nil { + return err + } + deploymentList, err := client.AppsV1().Deployments("").List(context.Background(), metav1.ListOptions{}) + if err != nil { + return err + } + for _, deployment := range deploymentList.Items { + if _, ok := deploymentSet[deployment.Name]; ok && deployment.Status.ReadyReplicas == deployment.Status.Replicas { + deploymentSet[deployment.Name] = true + } + } + for d, found := range deploymentSet { + if !found { + return fmt.Errorf("failed to deploy %s", d) + } + } + + return nil +} + +func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) { + clientSet, err := K8sClient(kubeconfigFile) + if err != nil { + return nil, err + } + nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + return nodes.Items, nil +} + +func ParsePods(kubeconfigFile string) ([]corev1.Pod, error) { + clientSet, err := K8sClient(kubeconfigFile) + if err != nil { + return nil, err + } + pods, err := clientSet.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + return pods.Items, nil +} + +// AllPodsUp checks if pods on the cluster are Running or Succeeded, otherwise returns an error +func AllPodsUp(kubeconfigFile string) error { + clientSet, err := K8sClient(kubeconfigFile) + if err != nil { + return err + } + pods, err := clientSet.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{}) + if err != nil { + return err + } + for _, pod := range pods.Items { + // Check if the pod is running + if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodSucceeded { + return fmt.Errorf("pod %s is %s", pod.Name, pod.Status.Phase) + } + } + return nil +} + +// PodReady checks if a pod is ready by querying its status +func PodReady(podName, namespace, kubeconfigFile string) (bool, error) { + clientSet, err := K8sClient(kubeconfigFile) + if err != nil { + return false, err + } + pod, err := clientSet.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("failed to get pod: %v", err) + } + // Check if the pod is running + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.Name == podName && containerStatus.Ready { + return true, nil + } + } + return false, nil +} + +// Checks if provided nodes are ready, otherwise returns an error +func NodesReady(kubeconfigFile string, nodeNames []string) error { + nodes, err := ParseNodes(kubeconfigFile) + if err != nil { + return err + } + nodesToCheck := set.New(nodeNames...) + readyNodes := make(set.Set[string], 0) + for _, node := range nodes { + for _, condition := range node.Status.Conditions { + if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue { + return fmt.Errorf("node %s is not ready", node.Name) + } + readyNodes.Insert(node.Name) + } + } + // Check if all nodes are ready + if !nodesToCheck.Equal(readyNodes) { + return fmt.Errorf("expected nodes %v, found %v", nodesToCheck, readyNodes) + } + return nil +} + +func K8sClient(kubeconfigFile string) (*kubernetes.Clientset, error) { + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile) + if err != nil { + return nil, err + } + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return clientSet, nil +} diff --git a/tests/docker/basics/basics_test.go b/tests/docker/basics/basics_test.go index 9349847def59..390b598a6ca1 100644 --- a/tests/docker/basics/basics_test.go +++ b/tests/docker/basics/basics_test.go @@ -7,12 +7,13 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers") +var k3sImage = flag.String("k3sImage", "", "The image used to provision containers") var config *tester.TestConfig func Test_DockerBasic(t *testing.T) { @@ -31,26 +32,22 @@ var _ = Describe("Basic Tests", Ordered, func() { Expect(config.ProvisionServers(1)).To(Succeed()) Expect(config.ProvisionAgents(1)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) Eventually(func() error { - return tester.NodesReady(config.KubeconfigFile) + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) }, "40s", "5s").Should(Succeed()) }) }) Context("Use Local Storage Volume", func() { It("should apply local storage volume", func() { - const volumeTestManifest = "../resources/volume-test.yaml" - - // Apply the manifest - cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile) - _, err := tester.RunCommand(cmd) + _, err := config.DeployWorkload("volume-test.yaml") Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest") }) It("should validate local storage volume", func() { Eventually(func() (bool, error) { - return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile) + return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile) }, "20s", "5s").Should(BeTrue()) }) }) @@ -58,9 +55,9 @@ var _ = Describe("Basic Tests", Ordered, func() { Context("Verify Binaries and Images", func() { It("has valid bundled binaries", func() { for _, server := range config.Servers { - Expect(tester.VerifyValidVersion(server.Name, "kubectl")).To(Succeed()) - Expect(tester.VerifyValidVersion(server.Name, "ctr")).To(Succeed()) - Expect(tester.VerifyValidVersion(server.Name, "crictl")).To(Succeed()) + Expect(tester.VerifyValidVersion(server, "kubectl")).To(Succeed()) + Expect(tester.VerifyValidVersion(server, "ctr")).To(Succeed()) + Expect(tester.VerifyValidVersion(server, "crictl")).To(Succeed()) } }) It("has valid airgap images", func() { diff --git a/tests/docker/bootstraptoken/bootstraptoken_test.go b/tests/docker/bootstraptoken/bootstraptoken_test.go index e61f08baca11..66f92a4a5344 100644 --- a/tests/docker/bootstraptoken/bootstraptoken_test.go +++ b/tests/docker/bootstraptoken/bootstraptoken_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -28,7 +29,7 @@ var _ = Describe("Boostrap Token Tests", Ordered, func() { Expect(err).NotTo(HaveOccurred()) Expect(config.ProvisionServers(1)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) }) }) @@ -37,19 +38,19 @@ var _ = Describe("Boostrap Token Tests", Ordered, func() { var newSecret string It("creates a bootstrap token", func() { var err error - newSecret, err = tester.RunCmdOnDocker(config.Servers[0].Name, "k3s token create --ttl=5m --description=Test") + newSecret, err = config.Servers[0].RunCmdOnNode("k3s token create --ttl=5m --description=Test") Expect(err).NotTo(HaveOccurred()) Expect(newSecret).NotTo(BeEmpty()) }) It("joins the agent with the new tokens", func() { newSecret = strings.ReplaceAll(newSecret, "\n", "") - config.Secret = newSecret + config.Token = newSecret Expect(config.ProvisionAgents(1)).To(Succeed()) Eventually(func(g Gomega) { - nodes, err := tester.ParseNodes(config.KubeconfigFile) + nodes, err := tests.ParseNodes(config.KubeconfigFile) g.Expect(err).NotTo(HaveOccurred()) g.Expect(nodes).To(HaveLen(2)) - g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed()) + g.Expect(tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())).To(Succeed()) }, "40s", "5s").Should(Succeed()) }) }) diff --git a/tests/docker/cacerts/cacerts_test.go b/tests/docker/cacerts/cacerts_test.go index 8bc49119135b..f7e85e301593 100644 --- a/tests/docker/cacerts/cacerts_test.go +++ b/tests/docker/cacerts/cacerts_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -62,7 +63,7 @@ var _ = Describe("CA Certs Tests", Ordered, func() { Expect(config.ProvisionServers(1)).To(Succeed()) Expect(config.ProvisionAgents(1)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) }) }) diff --git a/tests/docker/conformance/conformance_test.go b/tests/docker/conformance/conformance_test.go new file mode 100644 index 000000000000..b6a5bb24353c --- /dev/null +++ b/tests/docker/conformance/conformance_test.go @@ -0,0 +1,141 @@ +package main + +import ( + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/k3s-io/k3s/tests" + tester "github.com/k3s-io/k3s/tests/docker" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers") +var db = flag.String("db", "", "The database to use for the tests (sqlite, etcd, mysql, postgres)") +var serial = flag.Bool("serial", false, "Run the Serial Conformance Tests") +var config *tester.TestConfig + +func Test_DockerConformance(t *testing.T) { + flag.Parse() + RegisterFailHandler(Fail) + RunSpecs(t, "Conformance Docker Test Suite") +} + +var _ = Describe("Conformance Tests", Ordered, func() { + + Context("Setup Cluster", func() { + It("should provision servers and agents", func() { + var err error + config, err = tester.NewTestConfig(*k3sImage) + Expect(err).NotTo(HaveOccurred()) + config.DBType = *db + Expect(config.ProvisionServers(1)).To(Succeed()) + Expect(config.ProvisionAgents(1)).To(Succeed()) + Eventually(func() error { + return tests.CheckDefaultDeployments(config.KubeconfigFile) + }, "90s", "5s").Should(Succeed()) + Eventually(func() error { + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) + }, "40s", "5s").Should(Succeed()) + }) + }) + Context("Run Hydrophone Conformance tests", func() { + It("should download hydrophone", func() { + hydrophoneVersion := "v0.6.0" + hydrophoneArch := runtime.GOARCH + if hydrophoneArch == "amd64" { + hydrophoneArch = "x86_64" + } + hydrophoneURL := fmt.Sprintf("https://github.com/kubernetes-sigs/hydrophone/releases/download/%s/hydrophone_Linux_%s.tar.gz", + hydrophoneVersion, hydrophoneArch) + cmd := fmt.Sprintf("curl -L %s | tar -xzf - -C %s", hydrophoneURL, config.TestDir) + _, err := tester.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Chmod(filepath.Join(config.TestDir, "hydrophone"), 0755)).To(Succeed()) + }) + // Takes about 15min to run, so expect nothing to happen for a while + It("should run parallel conformance tests", func() { + if *serial { + Skip("Skipping parallel conformance tests") + } + cmd := fmt.Sprintf("%s --focus=\"Conformance\" --skip=\"Serial|Flaky\" -v 2 -p %d --kubeconfig %s", + filepath.Join(config.TestDir, "hydrophone"), + runtime.NumCPU()/2, + config.KubeconfigFile) + By("Hydrophone: " + cmd) + hc, err := StartCmd(cmd) + Expect(err).NotTo(HaveOccurred()) + // Periodically check the number of tests that have run, since the hydrophone output does not support a progress status + // Taken from https://github.com/kubernetes-sigs/hydrophone/issues/223#issuecomment-2547174722 + go func() { + cmd := fmt.Sprintf("kubectl exec -n=conformance e2e-conformance-test -c output-container --kubeconfig=%s -- cat /tmp/results/e2e.log | grep -o \"•\" | wc -l", + config.KubeconfigFile) + for i := 1; ; i++ { + time.Sleep(120 * time.Second) + if hc.ProcessState != nil { + break + } + res, _ := tester.RunCommand(cmd) + res = strings.TrimSpace(res) + fmt.Printf("Status Report %d: %s tests complete\n", i, res) + } + }() + Expect(hc.Wait()).To(Succeed()) + }) + It("should run serial conformance tests", func() { + if !*serial { + Skip("Skipping serial conformance tests") + } + cmd := fmt.Sprintf("%s --focus=\"Serial\" --skip=\"Flaky\" -v 2 --kubeconfig %s", + filepath.Join(config.TestDir, "hydrophone"), + config.KubeconfigFile) + By("Hydrophone: " + cmd) + hc, err := StartCmd(cmd) + Expect(err).NotTo(HaveOccurred()) + go func() { + cmd := fmt.Sprintf("kubectl exec -n=conformance e2e-conformance-test -c output-container --kubeconfig=%s -- cat /tmp/results/e2e.log | grep -o \"•\" | wc -l", + config.KubeconfigFile) + for i := 1; ; i++ { + time.Sleep(120 * time.Second) + if hc.ProcessState != nil { + break + } + res, _ := tester.RunCommand(cmd) + res = strings.TrimSpace(res) + fmt.Printf("Status Report %d: %s tests complete\n", i, res) + } + }() + Expect(hc.Wait()).To(Succeed()) + }) + }) +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if config != nil && !failed { + config.Cleanup() + } +}) + +// StartCmd starts a command and pipes its output to +// the ginkgo Writr, with the expectation to poll the progress of the command +func StartCmd(cmd string) (*exec.Cmd, error) { + c := exec.Command("sh", "-c", cmd) + c.Stdout = GinkgoWriter + c.Stderr = GinkgoWriter + if err := c.Start(); err != nil { + return c, err + } + return c, nil +} diff --git a/tests/docker/etcd/etcd_test.go b/tests/docker/etcd/etcd_test.go index 948f53f7da2f..9826e5402c27 100644 --- a/tests/docker/etcd/etcd_test.go +++ b/tests/docker/etcd/etcd_test.go @@ -5,6 +5,7 @@ import ( "os" "testing" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -30,11 +31,10 @@ var _ = Describe("Etcd Tests", Ordered, func() { It("should provision servers", func() { Expect(config.ProvisionServers(3)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) - Eventually(func(g Gomega) { - g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3)) - g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed()) + Eventually(func() error { + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) }, "60s", "5s").Should(Succeed()) }) It("should destroy the cluster", func() { @@ -57,12 +57,11 @@ var _ = Describe("Etcd Tests", Ordered, func() { Expect(config.ProvisionServers(5)).To(Succeed()) Expect(config.ProvisionAgents(1)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + }, "90s", "5s").Should(Succeed()) + Eventually(func() error { + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) }, "90s", "5s").Should(Succeed()) - Eventually(func(g Gomega) { - g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(6)) - g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed()) - }, "60s", "5s").Should(Succeed()) }) }) }) diff --git a/tests/docker/lazypull/lazypull_test.go b/tests/docker/lazypull/lazypull_test.go index 796f26efcf62..686f56bbb582 100644 --- a/tests/docker/lazypull/lazypull_test.go +++ b/tests/docker/lazypull/lazypull_test.go @@ -3,10 +3,10 @@ package main import ( "flag" "fmt" - "os" "strings" "testing" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -28,30 +28,25 @@ var _ = Describe("LazyPull Tests", Ordered, func() { var err error config, err = tester.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) - - Expect(os.Setenv("SERVER_ARGS", "--snapshotter=stargz")).To(Succeed()) + config.ServerYaml = "snapshotter: stargz" Expect(config.ProvisionServers(1)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) - }, "60s", "5s").Should(Succeed()) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + }, "90s", "5s").Should(Succeed()) Eventually(func() error { - return tester.NodesReady(config.KubeconfigFile) + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) }, "40s", "5s").Should(Succeed()) }) }) Context("Use Snapshot Container", func() { It("should apply local storage volume", func() { - const snapshotTestManifest = "../resources/snapshot-test.yaml" - - // Apply the manifest - cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", snapshotTestManifest, config.KubeconfigFile) - _, err := tester.RunCommand(cmd) + _, err := config.DeployWorkload("snapshot-test.yaml") Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest") }) It("should have the pod come up", func() { Eventually(func() (bool, error) { - return tester.PodReady("stargz-snapshot-test", "default", config.KubeconfigFile) + return tests.PodReady("stargz-snapshot-test", "default", config.KubeconfigFile) }, "30s", "5s").Should(BeTrue()) }) var topLayer string @@ -86,7 +81,7 @@ func lookLayers(node, layer string) error { layersNum := 0 var err error for layersNum = 0; layersNum < 100; layersNum++ { - // We use RunCommand instead of RunCmdOnDocker because we pipe the output to jq + // We use RunCommand instead of RunCmdOnNode because we pipe the output to jq cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshot --snapshotter=stargz info %s | jq -r '.Parent'", node, layer) layer, err = tester.RunCommand(cmd) if err != nil { @@ -121,7 +116,10 @@ func lookLayers(node, layer string) error { func getTopmostLayer(node, container string) (string, error) { var targetContainer string cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c ls -q labels.\"io.kubernetes.container.name\"==\"%s\" | sed -n 1p", node, container) - targetContainer, _ = tester.RunCommand(cmd) + targetContainer, err := tester.RunCommand(cmd) + if err != nil { + return "", fmt.Errorf("failed to get target container: %v", err) + } targetContainer = strings.TrimSpace(targetContainer) fmt.Println("targetContainer: ", targetContainer) if targetContainer == "" { diff --git a/tests/docker/resources/clusterip.yaml b/tests/docker/resources/clusterip.yaml new file mode 100644 index 000000000000..e972f32d19f1 --- /dev/null +++ b/tests/docker/resources/clusterip.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-clusterip +spec: + selector: + matchLabels: + k8s-app: nginx-app-clusterip + replicas: 2 + template: + metadata: + labels: + k8s-app: nginx-app-clusterip + spec: + containers: + - name: nginx + image: ranchertest/mytestcontainer + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: nginx-app-clusterip + name: nginx-clusterip-svc + namespace: default +spec: + type: ClusterIP + ports: + - port: 80 + selector: + k8s-app: nginx-app-clusterip diff --git a/tests/docker/resources/nodeport.yaml b/tests/docker/resources/nodeport.yaml new file mode 100644 index 000000000000..2187b732db89 --- /dev/null +++ b/tests/docker/resources/nodeport.yaml @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-nodeport +spec: + selector: + matchLabels: + k8s-app: nginx-app-nodeport + replicas: 2 + template: + metadata: + labels: + k8s-app: nginx-app-nodeport + spec: + containers: + - name: nginx + image: ranchertest/mytestcontainer + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: nginx-app-nodeport + name: nginx-nodeport-svc + namespace: default +spec: + type: NodePort + ports: + - port: 80 + nodePort: 30096 + name: http + selector: + k8s-app: nginx-app-nodeport diff --git a/tests/docker/skew/skew_test.go b/tests/docker/skew/skew_test.go index 1ffea750851f..2c40cc16b09f 100644 --- a/tests/docker/skew/skew_test.go +++ b/tests/docker/skew/skew_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/blang/semver/v4" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -15,12 +16,9 @@ import ( // Using these two flags, we upgrade from the latest release of to // the current commit build of K3s defined by var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s") -var branch = flag.String("branch", "master", "The release branch to test") +var channel = flag.String("channel", "latest", "The release channel to test") var config *tester.TestConfig -var numServers = 1 -var numAgents = 1 - func Test_DockerSkew(t *testing.T) { flag.Parse() RegisterFailHandler(Fail) @@ -33,14 +31,17 @@ var _ = BeforeSuite(func() { // For master and unreleased branches, we want the latest stable release var upgradeChannel string var err error - if *branch == "master" { + if *channel == "latest" || *channel == "v1.32" { + // disabled: AuthorizeNodeWithSelectors is now on by default, which breaks compat with agents < v1.32. + // This can be ren-enabled once the previous branch is v1.32 or higher, or when RBAC changes have been backported. + // ref: https://github.com/kubernetes/kubernetes/pull/128168 + Skip("Skipping version skew tests for " + *channel + " due to AuthorizeNodeWithSelectors") + upgradeChannel = "stable" } else { - upgradeChannel = strings.Replace(*branch, "release-", "v", 1) - // now that it is in v1.1 format, we want to substract one from the minor version - // to get the previous release - sV, err := semver.ParseTolerant(upgradeChannel) - Expect(err).NotTo(HaveOccurred(), "failed to parse version from "+upgradeChannel) + // We want to substract one from the minor version to get the previous release + sV, err := semver.ParseTolerant(*channel) + Expect(err).NotTo(HaveOccurred(), "failed to parse version from "+*channel) sV.Minor-- upgradeChannel = fmt.Sprintf("v%d.%d", sV.Major, sV.Minor) } @@ -54,42 +55,40 @@ var _ = BeforeSuite(func() { var _ = Describe("Skew Tests", Ordered, func() { Context("Setup Cluster with Server newer than Agent", func() { - It("should provision new servers and old agents", func() { + It("should provision new server and old agent", func() { var err error config, err = tester.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) - Expect(config.ProvisionServers(numServers)).To(Succeed()) + Expect(config.ProvisionServers(1)).To(Succeed()) config.K3sImage = "rancher/k3s:" + lastMinorVersion - Expect(config.ProvisionAgents(numAgents)).To(Succeed()) + Expect(config.ProvisionAgents(1)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) }) It("should match respective versions", func() { for _, server := range config.Servers { - out, err := tester.RunCmdOnDocker(server.Name, "k3s --version") + out, err := server.RunCmdOnNode("k3s --version") Expect(err).NotTo(HaveOccurred()) // The k3s image is in the format rancher/k3s:v1.20.0-k3s1 cVersion := strings.Split(*k3sImage, ":")[1] cVersion = strings.Replace(cVersion, "-amd64", "", 1) + cVersion = strings.Replace(cVersion, "-arm64", "", 1) + cVersion = strings.Replace(cVersion, "-arm", "", 1) cVersion = strings.Replace(cVersion, "-", "+", 1) Expect(out).To(ContainSubstring(cVersion)) } for _, agent := range config.Agents { - Expect(tester.RunCmdOnDocker(agent.Name, "k3s --version")). + Expect(agent.RunCmdOnNode("k3s --version")). To(ContainSubstring(strings.Replace(lastMinorVersion, "-", "+", 1))) } }) It("should deploy a test pod", func() { - const volumeTestManifest = "../resources/volume-test.yaml" - - // Apply the manifest - cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile) - _, err := tester.RunCommand(cmd) + _, err := config.DeployWorkload("volume-test.yaml") Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest") Eventually(func() (bool, error) { - return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile) + return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile) }, "20s", "5s").Should(BeTrue()) }) It("should destroy the cluster", func() { @@ -103,27 +102,31 @@ var _ = Describe("Skew Tests", Ordered, func() { Expect(err).NotTo(HaveOccurred()) }) It("should provision servers", func() { + // Since we are provisioning the first server alone, we need to explicently define the DB type + config.DBType = "etcd" Expect(config.ProvisionServers(1)).To(Succeed()) config.K3sImage = *k3sImage Expect(config.ProvisionServers(3)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) - }, "60s", "5s").Should(Succeed()) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + }, "90s", "5s").Should(Succeed()) Eventually(func(g Gomega) { - g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3)) - g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed()) + g.Expect(tests.ParseNodes(config.KubeconfigFile)).To(HaveLen(3)) + g.Expect(tests.NodesReady(config.KubeconfigFile, config.GetNodeNames())).To(Succeed()) }, "60s", "5s").Should(Succeed()) }) It("should match respective versions", func() { - out, err := tester.RunCmdOnDocker(config.Servers[0].Name, "k3s --version") + out, err := config.Servers[0].RunCmdOnNode("k3s --version") Expect(err).NotTo(HaveOccurred()) Expect(out).To(ContainSubstring(strings.Replace(lastMinorVersion, "-", "+", 1))) for _, server := range config.Servers[1:] { - out, err := tester.RunCmdOnDocker(server.Name, "k3s --version") + out, err := server.RunCmdOnNode("k3s --version") Expect(err).NotTo(HaveOccurred()) // The k3s image is in the format rancher/k3s:v1.20.0-k3s1-amd64 cVersion := strings.Split(*k3sImage, ":")[1] cVersion = strings.Replace(cVersion, "-amd64", "", 1) + cVersion = strings.Replace(cVersion, "-arm64", "", 1) + cVersion = strings.Replace(cVersion, "-arm", "", 1) cVersion = strings.Replace(cVersion, "-", "+", 1) Expect(out).To(ContainSubstring(cVersion)) } diff --git a/tests/docker/snapshotrestore/snapshotrestore_test.go b/tests/docker/snapshotrestore/snapshotrestore_test.go new file mode 100644 index 000000000000..ff3576bed13e --- /dev/null +++ b/tests/docker/snapshotrestore/snapshotrestore_test.go @@ -0,0 +1,210 @@ +package snapshotrestore + +import ( + "flag" + "fmt" + "strings" + "testing" + + "github.com/k3s-io/k3s/tests" + tester "github.com/k3s-io/k3s/tests/docker" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/utils/set" +) + +var k3sImage = flag.String("k3sImage", "rancher/systemd-node", "The image used to provision containers") +var serverCount = flag.Int("serverCount", 3, "number of server nodes") +var agentCount = flag.Int("agentCount", 1, "number of agent nodes") +var ci = flag.Bool("ci", false, "running on CI") +var config *tester.TestConfig +var snapshotname string + +func Test_DockerSnapshotRestore(t *testing.T) { + RegisterFailHandler(Fail) + flag.Parse() + suiteConfig, reporterConfig := GinkgoConfiguration() + RunSpecs(t, "SnapshotRestore Test Suite", suiteConfig, reporterConfig) +} + +var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { + Context("Setup Cluster", func() { + It("should provision servers and agents", func() { + var err error + config, err = tester.NewTestConfig(*k3sImage) + Expect(err).NotTo(HaveOccurred()) + Expect(config.ProvisionServers(*serverCount)).To(Succeed()) + Expect(config.ProvisionAgents(*agentCount)).To(Succeed()) + Eventually(func() error { + return tests.CheckDefaultDeployments(config.KubeconfigFile) + }, "60s", "5s").Should(Succeed()) + Eventually(func() error { + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) + }, "40s", "5s").Should(Succeed()) + }) + }) + Context("Cluster creates snapshots and workloads:", func() { + It("Verifies test workload before snapshot is created", func() { + res, err := config.DeployWorkload("clusterip.yaml") + Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res) + + Eventually(func(g Gomega) { + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + config.KubeconfigFile + res, err := tester.RunCommand(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: %q result: %s", cmd, res) + }, "240s", "5s").Should(Succeed()) + }) + + It("Verifies Snapshot is created", func() { + Eventually(func(g Gomega) { + _, err := config.Servers[0].RunCmdOnNode("k3s etcd-snapshot save") + g.Expect(err).NotTo(HaveOccurred()) + cmd := "ls /var/lib/rancher/k3s/server/db/snapshots/" + snapshotname, err = config.Servers[0].RunCmdOnNode(cmd) + g.Expect(err).NotTo(HaveOccurred()) + fmt.Println("Snapshot Name", snapshotname) + g.Expect(snapshotname).Should(ContainSubstring("on-demand-server-0")) + }, "240s", "10s").Should(Succeed()) + }) + + It("Verifies another test workload after snapshot is created", func() { + res, err := config.DeployWorkload("nodeport.yaml") + Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed: "+res) + Eventually(func(g Gomega) { + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + config.KubeconfigFile + res, err := tester.RunCommand(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") + }, "240s", "5s").Should(Succeed()) + }) + + }) + + Context("Cluster restores from snapshot", func() { + It("Restores the snapshot", func() { + //Stop k3s on all servers + for _, server := range config.Servers { + cmd := "systemctl stop k3s" + Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred()) + if server != config.Servers[0] { + cmd = "k3s-killall.sh" + Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred()) + } + } + //Restores from snapshot on server-0 + cmd := "k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname + res, err := config.Servers[0].RunCmdOnNode(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) + + cmd = "systemctl start k3s" + Expect(config.Servers[0].RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred()) + + }) + + It("Checks that other servers are not ready", func() { + By("Fetching node status") + var readyNodeNames []string + var notReadyNodeNames []string + Eventually(func(g Gomega) { + readyNodeNames = []string{config.Servers[0].Name} + for _, agent := range config.Agents { + readyNodeNames = append(readyNodeNames, agent.Name) + } + for _, server := range config.Servers[1:] { + notReadyNodeNames = append(notReadyNodeNames, server.Name) + } + g.Expect(CheckNodeStatus(config.KubeconfigFile, readyNodeNames, notReadyNodeNames)).To(Succeed()) + }, "240s", "5s").Should(Succeed()) + }) + + It("Rejoins other servers to cluster", func() { + // We must remove the db directory on the other servers before restarting k3s + // otherwise the nodes may join the old cluster + for _, server := range config.Servers[1:] { + cmd := "rm -rf /var/lib/rancher/k3s/server/db" + Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred()) + } + + for _, server := range config.Servers[1:] { + cmd := "systemctl start k3s" + Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred()) + } + }) + + It("Checks that all nodes and pods are ready", func() { + By("Fetching node status") + Eventually(func() error { + return tests.NodesReady(config.KubeconfigFile, config.GetNodeNames()) + }, "60s", "5s").Should(Succeed()) + + By("Fetching Pods status") + Eventually(func(g Gomega) { + pods, err := tests.ParsePods(config.KubeconfigFile) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(string(pod.Status.Phase)).Should(Equal("Succeeded"), pod.Name) + } else { + g.Expect(string(pod.Status.Phase)).Should(Equal("Running"), pod.Name) + } + } + }, "120s", "5s").Should(Succeed()) + }) + + It("Verifies that workload1 exists and workload2 does not", func() { + cmd := "kubectl get pods --kubeconfig=" + config.KubeconfigFile + res, err := tester.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(res).Should(ContainSubstring("test-clusterip")) + Expect(res).ShouldNot(ContainSubstring("test-nodeport")) + }) + }) +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if *ci || (config != nil && !failed) { + Expect(config.Cleanup()).To(Succeed()) + } +}) + +// Checks if nodes match the expected status +// We use kubectl directly, because getting a NotReady node status from the API is not easy +func CheckNodeStatus(kubeconfigFile string, readyNodes, notReadyNodes []string) error { + readyNodesSet := set.New(readyNodes...) + notReadyNodesSet := set.New(notReadyNodes...) + foundReadyNodes := make(set.Set[string], 0) + foundNotReadyNodes := make(set.Set[string], 0) + + cmd := "kubectl get nodes --no-headers --kubeconfig=" + kubeconfigFile + res, err := tester.RunCommand(cmd) + if err != nil { + return err + } + // extract the node status from the 2nd column of kubectl output + for _, line := range strings.Split(res, "\n") { + if strings.Contains(line, "k3s-test") { + // Line for some reason needs to be split twice + split := strings.Fields(line) + status := strings.TrimSpace(split[1]) + if status == "NotReady" { + foundNotReadyNodes.Insert(split[0]) + } else if status == "Ready" { + foundReadyNodes.Insert(split[0]) + } + } + } + if !foundReadyNodes.Equal(readyNodesSet) { + return fmt.Errorf("expected ready nodes %v, found %v", readyNodesSet, foundReadyNodes) + } + if !foundNotReadyNodes.Equal(notReadyNodesSet) { + return fmt.Errorf("expected not ready nodes %v, found %v", notReadyNodesSet, foundNotReadyNodes) + } + return nil +} diff --git a/tests/docker/test-helpers b/tests/docker/test-helpers index 6a5335689557..61bbfa2031c9 100755 --- a/tests/docker/test-helpers +++ b/tests/docker/test-helpers @@ -525,6 +525,20 @@ run-test() { } export -f run-test +run-go-test() { + local delay=15 + ( + set +x + while [ $(count-running-tests) -ge ${MAX_CONCURRENT_TESTS:-3} ]; do + sleep $delay + done + ) + + go test -timeout=45m -v "$@" & + pids+=($!) +} +export -f run-go-test + # --- cleanup-test-env(){ diff --git a/tests/docker/test-helpers.go b/tests/docker/test-helpers.go index f4228de9dee8..bc5baacf1464 100644 --- a/tests/docker/test-helpers.go +++ b/tests/docker/test-helpers.go @@ -1,8 +1,6 @@ package docker import ( - "bytes" - "context" "fmt" "math/rand" "net" @@ -16,38 +14,35 @@ import ( "golang.org/x/mod/semver" "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" ) type TestConfig struct { TestDir string KubeconfigFile string - Label string - Secret string + Token string K3sImage string - NumServers int - NumAgents int - Servers []ServerConfig - Agents []AgentConfig + DBType string + Servers []Server + Agents []DockerNode + ServerYaml string + AgentYaml string } -type ServerConfig struct { +type DockerNode struct { Name string - Port int IP string - URL string } -type AgentConfig struct { - Name string - IP string +type Server struct { + DockerNode + Port int + URL string } // NewTestConfig initializes the test environment and returns the configuration -// k3s version and tag information is extracted from the version.sh script +// If k3sImage == "rancher/systemd-node", then the systemd-node container and the local k3s binary +// will be used to start the server. This is useful for scenarios where the server needs to be restarted. +// k3s version and tag information should be extracted from the version.sh script // and supplied as an argument to the function/test func NewTestConfig(k3sImage string) (*TestConfig, error) { config := &TestConfig{ @@ -60,8 +55,6 @@ func NewTestConfig(k3sImage string) (*TestConfig, error) { return nil, fmt.Errorf("failed to create temp directory: %v", err) } config.TestDir = tempDir - // Setup cleanup on exit - // setupCleanup(config) // Create required directories if err := os.MkdirAll(filepath.Join(config.TestDir, "logs"), 0755); err != nil { @@ -69,7 +62,7 @@ func NewTestConfig(k3sImage string) (*TestConfig, error) { } // Generate random secret - config.Secret = fmt.Sprintf("%012d", rand.Int63n(1000000000000)) + config.Token = fmt.Sprintf("%012d", rand.Int63n(1000000000000)) return config, nil } @@ -98,8 +91,7 @@ func getPort() int { // ProvisionServers starts the required number of k3s servers // and updates the kubeconfig file with the first cp server details func (config *TestConfig) ProvisionServers(numOfServers int) error { - config.NumServers = numOfServers - for i := 0; i < config.NumServers; i++ { + for i := 0; i < numOfServers; i++ { // If a server i already exists, skip. This is useful for scenarios where // the first server is started seperate from the rest of the servers @@ -108,43 +100,106 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { } testID := filepath.Base(config.TestDir) - name := fmt.Sprintf("k3s-server-%d-%s", i, strings.ToLower(testID)) + name := fmt.Sprintf("server-%d-%s", i, strings.ToLower(testID)) port := getPort() if port == -1 { return fmt.Errorf("failed to find an available port") } - serverImage := getEnvOrDefault("K3S_IMAGE_SERVER", config.K3sImage) + // Write the server yaml to a tmp file and mount it into the container + var yamlMount string + if config.ServerYaml != "" { + if err := os.WriteFile(filepath.Join(config.TestDir, fmt.Sprintf("server-%d.yaml", i)), []byte(config.ServerYaml), 0644); err != nil { + return fmt.Errorf("failed to write server yaml: %v", err) + } + yamlMount = fmt.Sprintf("--mount type=bind,src=%s,dst=/etc/rancher/k3s/config.yaml", filepath.Join(config.TestDir, fmt.Sprintf("server-%d.yaml", i))) + } - var joinOrStart string - if numOfServers > 0 { - if i == 0 { - joinOrStart = "--cluster-init" - } else { - if config.Servers[0].URL == "" { - return fmt.Errorf("first server URL is empty") - } - joinOrStart = fmt.Sprintf("--server %s", config.Servers[0].URL) + var joinServer string + var dbConnect string + var err error + if config.DBType == "" && numOfServers > 1 { + config.DBType = "etcd" + } else if config.DBType == "" { + config.DBType = "sqlite" + } + if i == 0 { + dbConnect, err = config.setupDatabase(true) + if err != nil { + return err } + } else { + dbConnect, err = config.setupDatabase(false) + if err != nil { + return err + } + if config.Servers[0].URL == "" { + return fmt.Errorf("first server URL is empty") + } + joinServer = fmt.Sprintf("--server %s", config.Servers[0].URL) + } + newServer := Server{ + DockerNode: DockerNode{ + Name: name, + }, + Port: port, } - // Assemble all the Docker args - dRun := strings.Join([]string{"docker run -d", - "--name", name, - "--hostname", name, - "--privileged", - "-p", fmt.Sprintf("127.0.0.1:%d:6443", port), - "-p", "6443", - "-e", fmt.Sprintf("K3S_TOKEN=%s", config.Secret), - "-e", "K3S_DEBUG=true", - os.Getenv("SERVER_DOCKER_ARGS"), - os.Getenv(fmt.Sprintf("SERVER_%d_DOCKER_ARGS", i)), - os.Getenv("REGISTRY_CLUSTER_ARGS"), - serverImage, - "server", joinOrStart, os.Getenv("SERVER_ARGS"), os.Getenv(fmt.Sprintf("SERVER_%d_ARGS", i))}, " ") - if out, err := RunCommand(dRun); err != nil { - return fmt.Errorf("failed to run server container: %s: %v", out, err) + // If we need restarts, we use the systemd-node container, volume mount the k3s binary + // and start the server using the install script + if config.K3sImage == "rancher/systemd-node" { + dRun := strings.Join([]string{"docker run -d", + "--name", name, + "--hostname", name, + "--privileged", + "-p", fmt.Sprintf("127.0.0.1:%d:6443", port), + "--memory", "2048m", + "-e", fmt.Sprintf("K3S_TOKEN=%s", config.Token), + "-e", "K3S_DEBUG=true", + "-e", "GOCOVERDIR=/tmp/k3s-cov", + "-v", "/sys/fs/bpf:/sys/fs/bpf", + "-v", "/lib/modules:/lib/modules", + "-v", "/var/run/docker.sock:/var/run/docker.sock", + "-v", "/var/lib/docker:/var/lib/docker", + yamlMount, + "--mount", "type=bind,source=$(pwd)/../../../dist/artifacts/k3s,target=/usr/local/bin/k3s", + fmt.Sprintf("%s:v0.0.5", config.K3sImage), + "/usr/lib/systemd/systemd --unit=noop.target --show-status=true"}, " ") + if out, err := RunCommand(dRun); err != nil { + return fmt.Errorf("failed to start systemd container: %s: %v", out, err) + } + time.Sleep(5 * time.Second) + cmd := "mkdir -p /tmp/k3s-cov" + if out, err := newServer.RunCmdOnNode(cmd); err != nil { + return fmt.Errorf("failed to create coverage directory: %s: %v", out, err) + } + // The pipe requires that we use sh -c with "" to run the command + cmd = fmt.Sprintf("/bin/sh -c \"curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='%s' INSTALL_K3S_SKIP_DOWNLOAD=true sh -\"", + dbConnect+" "+joinServer+" "+os.Getenv(fmt.Sprintf("SERVER_%d_ARGS", i))) + if out, err := newServer.RunCmdOnNode(cmd); err != nil { + return fmt.Errorf("failed to start server: %s: %v", out, err) + } + } else { + // Assemble all the Docker args + dRun := strings.Join([]string{"docker run -d", + "--name", name, + "--hostname", name, + "--privileged", + "-p", fmt.Sprintf("127.0.0.1:%d:6443", port), + "-p", "6443", + "-e", fmt.Sprintf("K3S_TOKEN=%s", config.Token), + "-e", "K3S_DEBUG=true", + "-e", "GOCOVERDIR=/tmp/", + os.Getenv("SERVER_DOCKER_ARGS"), + os.Getenv(fmt.Sprintf("SERVER_%d_DOCKER_ARGS", i)), + os.Getenv("REGISTRY_CLUSTER_ARGS"), + yamlMount, + config.K3sImage, + "server", dbConnect, joinServer, os.Getenv(fmt.Sprintf("SERVER_%d_ARGS", i))}, " ") + if out, err := RunCommand(dRun); err != nil { + return fmt.Errorf("failed to run server container: %s: %v", out, err) + } } // Get the IP address of the container @@ -155,13 +210,9 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { ip := strings.TrimSpace(ipOutput) url := fmt.Sprintf("https://%s:6443", ip) - - config.Servers = append(config.Servers, ServerConfig{ - Name: name, - Port: port, - IP: ip, - URL: url, - }) + newServer.URL = url + newServer.IP = ip + config.Servers = append(config.Servers, newServer) fmt.Printf("Started %s @ %s\n", name, url) @@ -176,8 +227,41 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { return copyAndModifyKubeconfig(config) } +// setupDatabase will start the configured database if startDB is true, +// and return the correct flag to join the configured database +func (config *TestConfig) setupDatabase(startDB bool) (string, error) { + + joinFlag := "" + startCmd := "" + switch config.DBType { + case "mysql": + startCmd = "docker run -d --name mysql -e MYSQL_ROOT_PASSWORD=docker -p 3306:3306 mysql:8.4" + joinFlag = "--datastore-endpoint='mysql://root:docker@tcp(172.17.0.1:3306)/k3s'" + case "postgres": + startCmd = "docker run -d --name postgres -e POSTGRES_PASSWORD=docker -p 5432:5432 postgres:16-alpine" + joinFlag = "--datastore-endpoint='postgres://postgres:docker@tcp(172.17.0.1:5432)/k3s'" + case "etcd": + if startDB { + joinFlag = "--cluster-init" + } + case "sqlite": + break + default: + return "", fmt.Errorf("unsupported database type: %s", config.DBType) + } + + if startDB && startCmd != "" { + if out, err := RunCommand(startCmd); err != nil { + return "", fmt.Errorf("failed to start %s container: %s: %v", config.DBType, out, err) + } + // Wait for DB to start + time.Sleep(10 * time.Second) + } + return joinFlag, nil + +} + func (config *TestConfig) ProvisionAgents(numOfAgents int) error { - config.NumAgents = numOfAgents if err := checkVersionSkew(config); err != nil { return err } @@ -185,28 +269,60 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error { k3sURL := getEnvOrDefault("K3S_URL", config.Servers[0].URL) var g errgroup.Group - for i := 0; i < config.NumAgents; i++ { + for i := 0; i < numOfAgents; i++ { i := i // capture loop variable g.Go(func() error { - name := fmt.Sprintf("k3s-agent-%d-%s", i, strings.ToLower(testID)) + name := fmt.Sprintf("agent-%d-%s", i, strings.ToLower(testID)) agentInstanceArgs := fmt.Sprintf("AGENT_%d_ARGS", i) + newAgent := DockerNode{ + Name: name, + } - // Assemble all the Docker args - dRun := strings.Join([]string{"docker run -d", - "--name", name, - "--hostname", name, - "--privileged", - "-e", fmt.Sprintf("K3S_TOKEN=%s", config.Secret), - "-e", fmt.Sprintf("K3S_URL=%s", k3sURL), - os.Getenv("AGENT_DOCKER_ARGS"), - os.Getenv(fmt.Sprintf("AGENT_%d_DOCKER_ARGS", i)), - os.Getenv("REGISTRY_CLUSTER_ARGS"), - getEnvOrDefault("K3S_IMAGE_AGENT", config.K3sImage), - "agent", os.Getenv("ARGS"), os.Getenv("AGENT_ARGS"), os.Getenv(agentInstanceArgs)}, " ") + if config.K3sImage == "rancher/systemd-node" { + dRun := strings.Join([]string{"docker run -d", + "--name", name, + "--hostname", name, + "--privileged", + "--memory", "2048m", + "-e", fmt.Sprintf("K3S_TOKEN=%s", config.Token), + "-e", fmt.Sprintf("K3S_URL=%s", k3sURL), + "-v", "/sys/fs/bpf:/sys/fs/bpf", + "-v", "/lib/modules:/lib/modules", + "-v", "/var/run/docker.sock:/var/run/docker.sock", + "-v", "/var/lib/docker:/var/lib/docker", + "--mount", "type=bind,source=$(pwd)/../../../dist/artifacts/k3s,target=/usr/local/bin/k3s", + fmt.Sprintf("%s:v0.0.5", config.K3sImage), + "/usr/lib/systemd/systemd --unit=noop.target --show-status=true"}, " ") + if out, err := RunCommand(dRun); err != nil { + return fmt.Errorf("failed to start systemd container: %s: %v", out, err) + } + time.Sleep(5 * time.Second) + // The pipe requires that we use sh -c with "" to run the command + sCmd := fmt.Sprintf("/bin/sh -c \"curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC='agent %s' INSTALL_K3S_SKIP_DOWNLOAD=true sh -\"", + os.Getenv(agentInstanceArgs)) - if out, err := RunCommand(dRun); err != nil { - return fmt.Errorf("failed to run agent container: %s: %v", out, err) + if out, err := newAgent.RunCmdOnNode(sCmd); err != nil { + return fmt.Errorf("failed to start server: %s: %v", out, err) + } + } else { + // Assemble all the Docker args + dRun := strings.Join([]string{"docker run -d", + "--name", name, + "--hostname", name, + "--privileged", + "-e", fmt.Sprintf("K3S_TOKEN=%s", config.Token), + "-e", fmt.Sprintf("K3S_URL=%s", k3sURL), + "-e", "GOCOVERDIR=/tmp/", + os.Getenv("AGENT_DOCKER_ARGS"), + os.Getenv(fmt.Sprintf("AGENT_%d_DOCKER_ARGS", i)), + os.Getenv("REGISTRY_CLUSTER_ARGS"), + config.K3sImage, + "agent", os.Getenv("ARGS"), os.Getenv(agentInstanceArgs)}, " ") + + if out, err := RunCommand(dRun); err != nil { + return fmt.Errorf("failed to run agent container: %s: %v", out, err) + } } // Get the IP address of the container @@ -215,11 +331,9 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error { return err } ip := strings.TrimSpace(ipOutput) + newAgent.IP = ip + config.Agents = append(config.Agents, newAgent) - config.Agents = append(config.Agents, AgentConfig{ - Name: name, - IP: ip, - }) fmt.Printf("Started %s\n", name) return nil }) @@ -244,6 +358,7 @@ func (config *TestConfig) RemoveNode(nodeName string) error { return nil } +// Returns a list of all node names func (config *TestConfig) GetNodeNames() []string { var nodeNames []string for _, server := range config.Servers { @@ -272,6 +387,18 @@ func (config *TestConfig) Cleanup() error { } } + // Stop DB if it was started + if config.DBType == "mysql" || config.DBType == "postgres" { + cmd := fmt.Sprintf("docker stop %s", config.DBType) + if _, err := RunCommand(cmd); err != nil { + errs = append(errs, fmt.Errorf("failed to stop %s: %v", config.DBType, err)) + } + cmd = fmt.Sprintf("docker rm %s", config.DBType) + if _, err := RunCommand(cmd); err != nil { + errs = append(errs, fmt.Errorf("failed to remove %s: %v", config.DBType, err)) + } + } + // Error out if we hit any issues if len(errs) > 0 { return fmt.Errorf("cleanup failed: %v", errs) @@ -315,28 +442,28 @@ func copyAndModifyKubeconfig(config *TestConfig) error { return nil } -// RunCmdOnDocker runs a command on a docker container -func RunCmdOnDocker(container, cmd string) (string, error) { - dCmd := fmt.Sprintf("docker exec %s %s", container, cmd) - return RunCommand(dCmd) +// RunCmdOnNode runs a command on a docker container +func (node DockerNode) RunCmdOnNode(cmd string) (string, error) { + dCmd := fmt.Sprintf("docker exec %s %s", node.Name, cmd) + out, err := RunCommand(dCmd) + if err != nil { + return out, fmt.Errorf("%v: on node %s: %s", err, node.Name, out) + } + return out, nil } // RunCommand Runs command on the host. -// Returns stdout and embeds stderr inside the error message. func RunCommand(cmd string) (string, error) { - var stdout, stderr bytes.Buffer c := exec.Command("bash", "-c", cmd) - c.Stdout = &stdout - c.Stderr = &stderr - err := c.Run() + out, err := c.CombinedOutput() if err != nil { - return stdout.String(), fmt.Errorf("failed to run command: %s: %s: %v", cmd, stderr.String(), err) + return string(out), fmt.Errorf("failed to run command: %s, %v", cmd, err) } - return stdout.String(), nil + return string(out), err } func checkVersionSkew(config *TestConfig) error { - if config.NumAgents > 0 { + if len(config.Agents) > 0 { serverImage := getEnvOrDefault("K3S_IMAGE_SERVER", config.K3sImage) agentImage := getEnvOrDefault("K3S_IMAGE_AGENT", config.K3sImage) if semver.Compare(semver.MajorMinor(agentImage), semver.MajorMinor(serverImage)) > 0 { @@ -355,8 +482,8 @@ func getEnvOrDefault(key, defaultValue string) string { } // VerifyValidVersion checks for invalid version strings -func VerifyValidVersion(container string, binary string) error { - output, err := RunCmdOnDocker(container, binary+" version") +func VerifyValidVersion(node Server, binary string) error { + output, err := node.RunCmdOnNode(binary + " version") if err != nil { return err } @@ -399,93 +526,21 @@ func GetVersionFromChannel(upgradeChannel string) (string, error) { return version, nil } -// TODO the below functions are duplicated in the integration test utils. Consider combining into commmon package -// DeploymentsReady checks if the provided list of deployments are ready, otherwise returns an error -func DeploymentsReady(deployments []string, kubeconfigFile string) error { - - deploymentSet := make(map[string]bool) - for _, d := range deployments { - deploymentSet[d] = false - } - - client, err := k8sClient(kubeconfigFile) +// TODO the below functions are replicated from e2e test utils. Consider combining into commmon package +func (config TestConfig) DeployWorkload(workload string) (string, error) { + resourceDir := "../resources" + files, err := os.ReadDir(resourceDir) if err != nil { - return err - } - deploymentList, err := client.AppsV1().Deployments("").List(context.Background(), metav1.ListOptions{}) - if err != nil { - return err - } - for _, deployment := range deploymentList.Items { - if _, ok := deploymentSet[deployment.Name]; ok && deployment.Status.ReadyReplicas == deployment.Status.Replicas { - deploymentSet[deployment.Name] = true + err = fmt.Errorf("%s : Unable to read resource manifest file for %s", err, workload) + return "", err + } + fmt.Println("\nDeploying", workload) + for _, f := range files { + filename := filepath.Join(resourceDir, f.Name()) + if strings.TrimSpace(f.Name()) == workload { + cmd := "kubectl apply -f " + filename + " --kubeconfig=" + config.KubeconfigFile + return RunCommand(cmd) } } - for d, found := range deploymentSet { - if !found { - return fmt.Errorf("failed to deploy %s", d) - } - } - - return nil -} - -func ParseNodes(kubeconfigFile string) ([]corev1.Node, error) { - clientSet, err := k8sClient(kubeconfigFile) - if err != nil { - return nil, err - } - nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - - return nodes.Items, nil -} - -// PodReady checks if a pod is ready by querying its status -func PodReady(podName, namespace, kubeconfigFile string) (bool, error) { - clientSet, err := k8sClient(kubeconfigFile) - if err != nil { - return false, err - } - pod, err := clientSet.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{}) - if err != nil { - return false, fmt.Errorf("failed to get pod: %v", err) - } - // Check if the pod is running - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.Name == podName && containerStatus.Ready { - return true, nil - } - } - return false, nil -} - -// Checks if all nodes are ready, otherwise returns an error -func NodesReady(kubeconfigFile string) error { - nodes, err := ParseNodes(kubeconfigFile) - if err != nil { - return err - } - for _, node := range nodes { - for _, condition := range node.Status.Conditions { - if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue { - return fmt.Errorf("node %s is not ready", node.Name) - } - } - } - return nil -} - -func k8sClient(kubeconfigFile string) (*kubernetes.Clientset, error) { - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigFile) - if err != nil { - return nil, err - } - clientSet, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, err - } - return clientSet, nil + return "", nil } diff --git a/tests/docker/upgrade/upgrade_test.go b/tests/docker/upgrade/upgrade_test.go index 9bec74438bff..7f2d20e12ca4 100644 --- a/tests/docker/upgrade/upgrade_test.go +++ b/tests/docker/upgrade/upgrade_test.go @@ -1,4 +1,4 @@ -package main +package upgrade import ( "flag" @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -17,7 +18,7 @@ import ( // Using these two flags, we upgrade from the latest release of to // the current commit build of K3s defined by var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s") -var branch = flag.String("branch", "master", "The release branch to test") +var channel = flag.String("channel", "latest", "The release channel to test") var config *tester.TestConfig var numServers = 1 @@ -34,22 +35,15 @@ var _ = Describe("Upgrade Tests", Ordered, func() { Context("Setup Cluster with Lastest Release", func() { var latestVersion string It("should determine latest branch version", func() { - var upgradeChannel string - var err error - if *branch == "master" { - upgradeChannel = "latest" - } else { - upgradeChannel = strings.Replace(*branch, "release-", "v", 1) - url := fmt.Sprintf("https://update.k3s.io/v1-release/channels/%s", upgradeChannel) - resp, err := http.Head(url) - // Cover the case where the branch does not exist yet, - // such as a new unreleased minor version - if err != nil || resp.StatusCode != http.StatusOK { - upgradeChannel = "latest" - } + url := fmt.Sprintf("https://update.k3s.io/v1-release/channels/%s", *channel) + resp, err := http.Head(url) + // Cover the case where the branch does not exist yet, + // such as a new unreleased minor version + if err != nil || resp.StatusCode != http.StatusOK { + *channel = "latest" } - latestVersion, err = tester.GetVersionFromChannel(upgradeChannel) + latestVersion, err = tester.GetVersionFromChannel(*channel) Expect(err).NotTo(HaveOccurred()) Expect(latestVersion).To(ContainSubstring("v1.")) fmt.Println("Using latest version: ", latestVersion) @@ -60,15 +54,15 @@ var _ = Describe("Upgrade Tests", Ordered, func() { testID := filepath.Base(config.TestDir) Expect(err).NotTo(HaveOccurred()) for i := 0; i < numServers; i++ { - m1 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID) - m2 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-log,dst=/var/log", i, testID) - m3 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-etc,dst=/etc/rancher", i, testID) + m1 := fmt.Sprintf("--mount type=volume,src=server-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID) + m2 := fmt.Sprintf("--mount type=volume,src=server-%d-%s-log,dst=/var/log", i, testID) + m3 := fmt.Sprintf("--mount type=volume,src=server-%d-%s-etc,dst=/etc/rancher", i, testID) Expect(os.Setenv(fmt.Sprintf("SERVER_%d_DOCKER_ARGS", i), fmt.Sprintf("%s %s %s", m1, m2, m3))).To(Succeed()) } for i := 0; i < numAgents; i++ { - m1 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID) - m2 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-log,dst=/var/log", i, testID) - m3 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-etc,dst=/etc/rancher", i, testID) + m1 := fmt.Sprintf("--mount type=volume,src=agent-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID) + m2 := fmt.Sprintf("--mount type=volume,src=agent-%d-%s-log,dst=/var/log", i, testID) + m3 := fmt.Sprintf("--mount type=volume,src=agent-%d-%s-etc,dst=/etc/rancher", i, testID) Expect(os.Setenv(fmt.Sprintf("AGENT_%d_DOCKER_ARGS", i), fmt.Sprintf("%s %s %s", m1, m2, m3))).To(Succeed()) } }) @@ -76,26 +70,22 @@ var _ = Describe("Upgrade Tests", Ordered, func() { Expect(config.ProvisionServers(numServers)).To(Succeed()) Expect(config.ProvisionAgents(numAgents)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) }) It("should confirm latest version", func() { for _, server := range config.Servers { - out, err := tester.RunCmdOnDocker(server.Name, "k3s --version") + out, err := server.RunCmdOnNode("k3s --version") Expect(err).NotTo(HaveOccurred()) Expect(out).To(ContainSubstring(strings.Replace(latestVersion, "-", "+", 1))) } }) It("should deploy a test pod", func() { - const volumeTestManifest = "../resources/volume-test.yaml" - - // Apply the manifest - cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile) - _, err := tester.RunCommand(cmd) + _, err := config.DeployWorkload("volume-test.yaml") Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest") Eventually(func() (bool, error) { - return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile) + return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile) }, "20s", "5s").Should(BeTrue()) }) It("should upgrade to current commit build", func() { @@ -122,26 +112,28 @@ var _ = Describe("Upgrade Tests", Ordered, func() { Expect(config.ProvisionAgents(numAgents)).To(Succeed()) Eventually(func() error { - return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile) }, "60s", "5s").Should(Succeed()) }) It("should confirm commit version", func() { for _, server := range config.Servers { - Expect(tester.VerifyValidVersion(server.Name, "kubectl")).To(Succeed()) - Expect(tester.VerifyValidVersion(server.Name, "ctr")).To(Succeed()) - Expect(tester.VerifyValidVersion(server.Name, "crictl")).To(Succeed()) + Expect(tester.VerifyValidVersion(server, "kubectl")).To(Succeed()) + Expect(tester.VerifyValidVersion(server, "ctr")).To(Succeed()) + Expect(tester.VerifyValidVersion(server, "crictl")).To(Succeed()) - out, err := tester.RunCmdOnDocker(server.Name, "k3s --version") + out, err := server.RunCmdOnNode("k3s --version") Expect(err).NotTo(HaveOccurred()) cVersion := strings.Split(*k3sImage, ":")[1] cVersion = strings.Replace(cVersion, "-amd64", "", 1) + cVersion = strings.Replace(cVersion, "-arm64", "", 1) + cVersion = strings.Replace(cVersion, "-arm", "", 1) cVersion = strings.Replace(cVersion, "-", "+", 1) Expect(out).To(ContainSubstring(cVersion)) } }) It("should confirm test pod is still Running", func() { Eventually(func() (bool, error) { - return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile) + return tests.PodReady("volume-test", "kube-system", config.KubeconfigFile) }, "20s", "5s").Should(BeTrue()) }) diff --git a/tests/e2e/autoimport/autoimport_test.go b/tests/e2e/autoimport/autoimport_test.go index 83eff17d1818..2fd103243667 100644 --- a/tests/e2e/autoimport/autoimport_test.go +++ b/tests/e2e/autoimport/autoimport_test.go @@ -2,11 +2,10 @@ package autoimport import ( "flag" - "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,11 +31,7 @@ func Test_E2EAutoImport(t *testing.T) { RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -45,133 +40,121 @@ var _ = Describe("Verify Create", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Create a folder in agent/images", func() { cmd := `mkdir /var/lib/rancher/k3s/agent/images` - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) }) It("Create file for auto import and search in the image store", func() { cmd := `echo docker.io/library/redis:latest | sudo tee /var/lib/rancher/k3s/agent/images/testautoimport.txt` - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/redis` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) It("Change name for the file and see if the label is still pinned", func() { cmd := `mv /var/lib/rancher/k3s/agent/images/testautoimport.txt /var/lib/rancher/k3s/agent/images/testautoimportrename.txt` - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/redis` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) It("Create, remove and create again a file", func() { cmd := `echo docker.io/library/busybox:latest | sudo tee /var/lib/rancher/k3s/agent/images/bb.txt` - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/busybox` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) cmd = `rm /var/lib/rancher/k3s/agent/images/bb.txt` - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/busybox` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) cmd = `echo docker.io/library/busybox:latest | sudo tee /var/lib/rancher/k3s/agent/images/bb.txt` - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/busybox` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) It("Move the folder, add a image and then see if the image is going to be pinned", func() { cmd := `mv /var/lib/rancher/k3s/agent/images /var/lib/rancher/k3s/agent/test` - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) cmd = `echo 'docker.io/library/mysql:latest' | sudo tee /var/lib/rancher/k3s/agent/test/mysql.txt` - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) cmd = `mv /var/lib/rancher/k3s/agent/test /var/lib/rancher/k3s/agent/images` - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/mysql` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) It("Restarts normally", func() { - errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...)) + errRestart := e2e.RestartCluster(append(tc.Servers, tc.Agents...)) Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) @@ -182,29 +165,29 @@ var _ = Describe("Verify Create", Ordered, func() { It("Verify bb.txt image and see if are pinned", func() { Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/busybox` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) It("Removes bb.txt file", func() { cmd := `rm /var/lib/rancher/k3s/agent/images/bb.txt` - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/busybox` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).Should(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) It("Restarts normally", func() { - errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...)) + errRestart := e2e.RestartCluster(append(tc.Servers, tc.Agents...)) Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) @@ -215,8 +198,8 @@ var _ = Describe("Verify Create", Ordered, func() { It("Verify if bb.txt image is unpinned", func() { Eventually(func(g Gomega) { cmd := `k3s ctr images list | grep library/busybox` - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).ShouldNot(ContainSubstring("io.cattle.k3s.pinned=pinned")) - g.Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).ShouldNot(ContainSubstring("io.cri-containerd.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).ShouldNot(ContainSubstring("io.cattle.k3s.pinned=pinned")) + g.Expect(tc.Servers[0].RunCmdOnNode(cmd)).ShouldNot(ContainSubstring("io.cri-containerd.pinned=pinned")) }, "620s", "5s").Should(Succeed()) }) @@ -231,10 +214,10 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if !failed { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/btrfs/btrfs_test.go b/tests/e2e/btrfs/btrfs_test.go index a99211a1634c..4b7b07855780 100644 --- a/tests/e2e/btrfs/btrfs_test.go +++ b/tests/e2e/btrfs/btrfs_test.go @@ -2,11 +2,10 @@ package rotateca import ( "flag" - "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -25,10 +24,7 @@ func Test_E2EBtrfsSnapshot(t *testing.T) { RunSpecs(t, "Btrfs Snapshot Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -38,48 +34,39 @@ var _ = Describe("Verify that btrfs based servers work", Ordered, func() { var err error // OS and server are hardcoded because only openSUSE Leap 15.5 natively supports Btrfs if *local { - serverNodeNames, _, err = e2e.CreateLocalCluster("opensuse/Leap-15.6.x86_64", 1, 0) + tc, err = e2e.CreateLocalCluster("opensuse/Leap-15.6.x86_64", 1, 0) } else { - serverNodeNames, _, err = e2e.CreateCluster("opensuse/Leap-15.6.x86_64", 1, 0) + tc, err = e2e.CreateCluster("opensuse/Leap-15.6.x86_64", 1, 0) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("Server Nodes:", serverNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By(tc.Status()) }) It("Checks node and pod status", func() { - fmt.Printf("\nFetching node status\n") + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Checks that btrfs snapshots exist", func() { cmd := "btrfs subvolume list /var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.btrfs" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(MatchRegexp("agent/containerd/io.containerd.snapshotter.v1.btrfs/active/\\d+")) - Expect(res).To(MatchRegexp("agent/containerd/io.containerd.snapshotter.v1.btrfs/snapshots/\\d+")) + Eventually(func(g Gomega) { + res, err := tc.Servers[0].RunCmdOnNode(cmd) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res).To(MatchRegexp("agent/containerd/io.containerd.snapshotter.v1.btrfs/active/\\d+")) + g.Expect(res).To(MatchRegexp("agent/containerd/io.containerd.snapshotter.v1.btrfs/snapshots/\\d+")) + }, "30s", "5s").Should(Succeed()) }) }) }) @@ -91,10 +78,10 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - Expect(e2e.SaveJournalLogs(serverNodeNames)).To(Succeed()) + Expect(e2e.SaveJournalLogs(tc.Servers)).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/dualstack/dualstack_test.go b/tests/e2e/dualstack/dualstack_test.go index 9262af922cea..caa292418b4f 100644 --- a/tests/e2e/dualstack/dualstack_test.go +++ b/tests/e2e/dualstack/dualstack_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -27,165 +28,151 @@ func Test_E2EDualStack(t *testing.T) { RunSpecs(t, "DualStack Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify DualStack Configuration", Ordered, func() { + Context("Cluster Deploys with both IPv6 and IPv4 networks", func() { + It("Starts up with no issues", func() { + var err error + if *local { + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + } + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + tc.Hardened = *hardened + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "620s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(tc.KubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) + It("Checks pod status", func() { + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "620s", "5s").Should(Succeed()) + e2e.DumpPods(tc.KubeConfigFile) + }) - It("Checks Node Status", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + It("Verifies that each node has IPv4 and IPv6", func() { + nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + Expect(node.IPv4).Should(ContainSubstring("10.10.10")) + Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff")) } - }, "620s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } + }) + It("Verifies that each pod has IPv4 and IPv6", func() { + podIPs, err := e2e.GetPodIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range podIPs { + Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name) + Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name) } - }, "620s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Verifies that each node has IPv4 and IPv6", func() { - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodeIPs { - Expect(node.IPv4).Should(ContainSubstring("10.10.10")) - Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff")) - } - }) - It("Verifies that each pod has IPv4 and IPv6", func() { - podIPs, err := e2e.GetPodIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range podIPs { - Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name) - Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name) - } - }) + }) - It("Verifies ClusterIP Service", func() { - _, err := e2e.DeployWorkload("dualstack_clusterip.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile - return e2e.RunCommand(cmd) - }, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod")) + It("Verifies ClusterIP Service", func() { + _, err := tc.DeployWorkload("dualstack_clusterip.yaml") + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod")) - // Checks both IPv4 and IPv6 - clusterips, err := e2e.FetchClusterIP(kubeConfigFile, "ds-clusterip-svc", true) - Expect(err).NotTo(HaveOccurred()) - for _, ip := range strings.Split(clusterips, ",") { - if strings.Contains(ip, "::") { - ip = "[" + ip + "]" - } - pods, err := e2e.ParsePods(kubeConfigFile, false) + // Checks both IPv4 and IPv6 + clusterips, err := e2e.FetchClusterIP(tc.KubeConfigFile, "ds-clusterip-svc", true) Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") { - continue + for _, ip := range strings.Split(clusterips, ",") { + if strings.Contains(ip, "::") { + ip = "[" + ip + "]" } - cmd := fmt.Sprintf("curl -L --insecure http://%s", ip) + pods, err := tests.ParsePods(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") { + continue + } + cmd := fmt.Sprintf("curl -L --insecure http://%s", ip) + Eventually(func() (string, error) { + return tc.Servers[0].RunCmdOnNode(cmd) + }, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd) + } + } + }) + It("Verifies Ingress", func() { + _, err := tc.DeployWorkload("dualstack_ingress.yaml") + Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") + cmd := "kubectl get ingress ds-ingress -o jsonpath=\"{.spec.rules[*].host}\"" + hostName, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + for _, node := range nodeIPs { + cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.IPv4) + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) + cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.IPv6) Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - }, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd) + return e2e.RunCommand(cmd) + }, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) } - } - }) - It("Verifies Ingress", func() { - _, err := e2e.DeployWorkload("dualstack_ingress.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - cmd := "kubectl get ingress ds-ingress --kubeconfig=" + kubeConfigFile + " -o jsonpath=\"{.spec.rules[*].host}\"" - hostName, err := e2e.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - for _, node := range nodeIPs { - cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.IPv4) - Eventually(func() (string, error) { - return e2e.RunCommand(cmd) - }, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) - cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.IPv6) - Eventually(func() (string, error) { - return e2e.RunCommand(cmd) - }, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) - } - }) + }) - It("Verifies NodePort Service", func() { - _, err := e2e.DeployWorkload("dualstack_nodeport.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := e2e.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodeIPs { - cmd = "curl -L --insecure http://" + node.IPv4 + ":" + nodeport + "/name.html" + It("Verifies NodePort Service", func() { + _, err := tc.DeployWorkload("dualstack_nodeport.yaml") + Expect(err).NotTo(HaveOccurred()) + cmd := "kubectl get service ds-nodeport-svc --output jsonpath=\"{.spec.ports[0].nodePort}\"" + nodeport, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) + nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + cmd = "curl -L --insecure http://" + node.IPv4 + ":" + nodeport + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) + cmd = "curl -L --insecure http://[" + node.IPv6 + "]:" + nodeport + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) + } + }) + It("Verifies podSelector Network Policy", func() { + _, err := tc.DeployWorkload("pod_client.yaml") + Expect(err).NotTo(HaveOccurred()) + cmd := "kubectl exec svc/client-curl -- curl -m7 ds-clusterip-svc/name.html" Eventually(func() (string, error) { return e2e.RunCommand(cmd) - }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) - cmd = "curl -L --insecure http://[" + node.IPv6 + "]:" + nodeport + "/name.html" + }, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) + _, err = tc.DeployWorkload("netpol-fail.yaml") + Expect(err).NotTo(HaveOccurred()) + cmd = "kubectl exec svc/client-curl -- curl -m7 ds-clusterip-svc/name.html" + Eventually(func() error { + _, err = e2e.RunCommand(cmd) + Expect(err).To(HaveOccurred()) + return err + }, "20s", "3s") + _, err = tc.DeployWorkload("netpol-work.yaml") + Expect(err).NotTo(HaveOccurred()) + cmd = "kubectl exec svc/client-curl -- curl -m7 ds-clusterip-svc/name.html" Eventually(func() (string, error) { return e2e.RunCommand(cmd) - }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) - } - }) - It("Verifies podSelector Network Policy", func() { - _, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 ds-clusterip-svc/name.html" - Eventually(func() (string, error) { - return e2e.RunCommand(cmd) - }, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) - _, err = e2e.DeployWorkload("netpol-fail.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 ds-clusterip-svc/name.html" - Eventually(func() error { - _, err = e2e.RunCommand(cmd) - Expect(err).To(HaveOccurred()) - return err - }, "20s", "3s") - _, err = e2e.DeployWorkload("netpol-work.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 ds-clusterip-svc/name.html" - Eventually(func() (string, error) { - return e2e.RunCommand(cmd) - }, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) + }, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) + }) }) }) @@ -196,12 +183,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/embeddedmirror/embeddedmirror_test.go b/tests/e2e/embeddedmirror/embeddedmirror_test.go index d753528bf616..c4256324cd65 100644 --- a/tests/e2e/embeddedmirror/embeddedmirror_test.go +++ b/tests/e2e/embeddedmirror/embeddedmirror_test.go @@ -4,9 +4,9 @@ import ( "flag" "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,11 +32,7 @@ func Test_E2EPrivateRegistry(t *testing.T) { RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -45,42 +41,31 @@ var _ = Describe("Verify Create", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + By("Fetching pod status") + Eventually(func() error { + e2e.DumpPods(tc.KubeConfigFile) + return tests.AllPodsUp(tc.KubeConfigFile) + }, "620s", "10s").Should(Succeed()) }) It("Should create and validate deployment with embedded registry mirror using image tag", func() { res, err := e2e.RunCommand("kubectl create deployment my-webpage-1 --image=docker.io/library/nginx:1.25.3") @@ -128,7 +113,7 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Should expose embedded registry metrics", func() { - grepCmd := fmt.Sprintf("kubectl get --raw /api/v1/nodes/%s/proxy/metrics | grep -F 'spegel_advertised_images{registry=\"docker.io\"}'", serverNodeNames[0]) + grepCmd := fmt.Sprintf("kubectl get --raw /api/v1/nodes/%s/proxy/metrics | grep -F 'spegel_advertised_images{registry=\"docker.io\"}'", tc.Servers[0]) res, err := e2e.RunCommand(grepCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) @@ -147,12 +132,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed()) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/externalip/externalip_test.go b/tests/e2e/externalip/externalip_test.go index f8acbc9bce41..0c560e2e30c8 100644 --- a/tests/e2e/externalip/externalip_test.go +++ b/tests/e2e/externalip/externalip_test.go @@ -7,11 +7,11 @@ package externalip import ( "flag" - "fmt" "os" "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -21,7 +21,6 @@ import ( var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system") var serverCount = flag.Int("serverCount", 1, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") -var hardened = flag.Bool("hardened", false, "true or false") var ci = flag.Bool("ci", false, "running on CI") var local = flag.Bool("local", false, "deploy a locally built K3s binary") @@ -55,107 +54,93 @@ func Test_E2EExternalIP(t *testing.T) { } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify External-IP config", Ordered, func() { - - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Node Status", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + Context("Cluster comes up with External-IP configuration", func() { + It("Starts up with no issues", func() { + var err error + if *local { + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } - }, "620s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) } - } - }, "620s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Verifies that each node has vagrant IP", func() { - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, node := range nodeIPs { - Expect(node.IPv4).Should(ContainSubstring("10.10.")) - } - }) - It("Verifies that each pod has vagrant IP or clusterCIDR IP", func() { - podIPs, err := e2e.GetPodIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range podIPs { - Expect(pod.IPv4).Should(Or(ContainSubstring("10.10."), ContainSubstring("10.42.")), pod.Name) - } + }, "620s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(tc.KubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks pod status", func() { + By("Fetching pod status") + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "620s", "10s").Should(Succeed()) + }) }) - It("Verifies that flannel added the correct annotation for the external-ip", func() { - nodeIPs, err := getExternalIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, annotation := range nodeIPs { - Expect(annotation).Should(ContainSubstring("10.100.100.")) - } - }) - It("Verifies internode connectivity over the tunnel", func() { - _, err := e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - - // Wait for the pod_client to have an IP - Eventually(func() string { - ips, _ := getClientIPs(kubeConfigFile) - return ips[0].IPv4 - }, "40s", "5s").Should(ContainSubstring("10.42"), "failed getClientIPs") - - clientIPs, err := getClientIPs(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - for _, ip := range clientIPs { - cmd := "kubectl exec svc/client-curl --kubeconfig=" + kubeConfigFile + " -- curl -m7 " + ip.IPv4 + "/name.html" + Context("Deploy workloads to check cluster connectivity of the nodes", func() { + It("Verifies that each node has vagrant IP", func() { + nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + Expect(node.IPv4).Should(ContainSubstring("10.10.")) + } + }) + It("Verifies that each pod has vagrant IP or clusterCIDR IP", func() { + podIPs, err := e2e.GetPodIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range podIPs { + Expect(pod.IPv4).Should(Or(ContainSubstring("10.10."), ContainSubstring("10.42.")), pod.Name) + } + }) + It("Verifies that flannel added the correct annotation for the external-ip", func() { + nodeIPs, err := getExternalIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, annotation := range nodeIPs { + Expect(annotation).Should(ContainSubstring("10.100.100.")) + } + }) + It("Verifies internode connectivity over the tunnel", func() { + _, err := tc.DeployWorkload("pod_client.yaml") + Expect(err).NotTo(HaveOccurred()) + + // Wait for the pod_client to have an IP + Eventually(func() string { + ips, _ := getClientIPs(tc.KubeConfigFile) + return ips[0].IPv4 + }, "40s", "5s").Should(ContainSubstring("10.42"), "failed getClientIPs") + + clientIPs, err := getClientIPs(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, ip := range clientIPs { + cmd := "kubectl exec svc/client-curl -- curl -m7 " + ip.IPv4 + "/name.html" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "20s", "3s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd) + } + }) + It("Verifies loadBalancer service's IP is the node-external-ip", func() { + _, err := tc.DeployWorkload("loadbalancer.yaml") + Expect(err).NotTo(HaveOccurred()) + cmd := "kubectl get svc -l k8s-app=nginx-app-loadbalancer -o=jsonpath='{range .items[*]}{.metadata.name}{.status.loadBalancer.ingress[*].ip}{end}'" Eventually(func() (string, error) { return e2e.RunCommand(cmd) - }, "20s", "3s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd) - } - }) - It("Verifies loadBalancer service's IP is the node-external-ip", func() { - _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred()) - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " get svc -l k8s-app=nginx-app-loadbalancer -o=jsonpath='{range .items[*]}{.metadata.name}{.status.loadBalancer.ingress[*].ip}{end}'" - Eventually(func() (string, error) { - return e2e.RunCommand(cmd) - }, "20s", "3s").Should(ContainSubstring("10.100.100"), "failed cmd: "+cmd) + }, "20s", "3s").Should(ContainSubstring("10.100.100"), "failed cmd: "+cmd) + }) }) }) @@ -166,12 +151,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed()) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/privateregistry/privateregistry_test.go b/tests/e2e/privateregistry/privateregistry_test.go index e7ab9187f870..e1d0c9a54afc 100644 --- a/tests/e2e/privateregistry/privateregistry_test.go +++ b/tests/e2e/privateregistry/privateregistry_test.go @@ -7,9 +7,11 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" ) // Valid nodeOS: @@ -32,11 +34,7 @@ func Test_E2EPrivateRegistry(t *testing.T) { RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -45,52 +43,41 @@ var _ = Describe("Verify Create", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + By("Fetching pod status") + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "620s", "10s").Should(Succeed()) }) It("Create new private registry", func() { - registry, err := e2e.RunCmdOnNode("docker run --init -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0]) + registry, err := tc.Servers[0].RunCmdOnNode("docker run --init -d -p 5000:5000 --restart=always --name registry registry:2 ") fmt.Println(registry) Expect(err).NotTo(HaveOccurred()) }) It("ensures registry is working", func() { - a, err := e2e.RunCmdOnNode("docker ps -a | grep registry\n", serverNodeNames[0]) + a, err := tc.Servers[0].RunCmdOnNode("docker ps -a | grep registry\n") fmt.Println(a) Expect(err).NotTo(HaveOccurred()) @@ -100,44 +87,44 @@ var _ = Describe("Verify Create", Ordered, func() { // NODEIP:5000 as a mirror. It("Should pull and image from dockerhub and send it to private registry", func() { cmd := "docker pull docker.io/library/nginx:1.27.3" - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) - nodeIP, err := e2e.FetchNodeExternalIP(serverNodeNames[0]) + nodeIP, err := tc.Servers[0].FetchNodeExternalIP() Expect(err).NotTo(HaveOccurred()) cmd = "docker tag docker.io/library/nginx:1.27.3 " + nodeIP + ":5000/docker-io-library/nginx:1.27.3" - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) cmd = "docker push " + nodeIP + ":5000/docker-io-library/nginx:1.27.3" - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) cmd = "docker image remove docker.io/library/nginx:1.27.3 " + nodeIP + ":5000/docker-io-library/nginx:1.27.3" - _, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), "failed: "+cmd) }) It("Should create and validate deployment with private registry on", func() { - res, err := e2e.RunCmdOnNode("kubectl create deployment my-webpage --image=my-registry.local/library/nginx:1.27.3", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("kubectl create deployment my-webpage --image=my-registry.local/library/nginx:1.27.3") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - var pod e2e.Pod + var pod corev1.Pod Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) + pods, err := tests.ParsePods(tc.KubeConfigFile) for _, p := range pods { if strings.Contains(p.Name, "my-webpage") { pod = p } } g.Expect(err).NotTo(HaveOccurred()) - g.Expect(pod.Status).Should(Equal("Running")) + g.Expect(string(pod.Status.Phase)).Should(Equal("Running")) }, "60s", "5s").Should(Succeed()) - cmd := "curl " + pod.IP - Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).To(ContainSubstring("Welcome to nginx!")) + cmd := "curl " + pod.Status.PodIP + Expect(tc.Servers[0].RunCmdOnNode(cmd)).To(ContainSubstring("Welcome to nginx!")) }) }) @@ -150,17 +137,17 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed()) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { - r1, err := e2e.RunCmdOnNode("docker rm -f registry", serverNodeNames[0]) + r1, err := tc.Servers[0].RunCmdOnNode("docker rm -f registry") Expect(err).NotTo(HaveOccurred(), r1) - r2, err := e2e.RunCmdOnNode("kubectl delete deployment my-webpage", serverNodeNames[0]) + r2, err := tc.Servers[0].RunCmdOnNode("kubectl delete deployment my-webpage") Expect(err).NotTo(HaveOccurred(), r2) Expect(err).NotTo(HaveOccurred()) Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/rootless/rootless_test.go b/tests/e2e/rootless/rootless_test.go index 4a205934e3d5..a320c76fdf9f 100644 --- a/tests/e2e/rootless/rootless_test.go +++ b/tests/e2e/rootless/rootless_test.go @@ -4,9 +4,9 @@ import ( "flag" "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -29,43 +29,40 @@ func Test_E2ERootlessStartupValidation(t *testing.T) { RunSpecs(t, "Startup Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string -) +var tc *e2e.TestConfig -func StartK3sCluster(nodes []string, serverYAML string) error { +func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string) error { for _, node := range nodes { resetCmd := "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" yamlCmd := fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML) startCmd := "systemctl --user restart k3s-rootless" - if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil { + if _, err := node.RunCmdOnNode(resetCmd); err != nil { return err } - if _, err := e2e.RunCmdOnNode(yamlCmd, node); err != nil { + if _, err := node.RunCmdOnNode(yamlCmd); err != nil { return err } - if _, err := RunCmdOnRootlesNode("systemctl --user daemon-reload", node); err != nil { + if _, err := RunCmdOnRootlessNode("systemctl --user daemon-reload", node.String()); err != nil { return err } - if _, err := RunCmdOnRootlesNode(startCmd, node); err != nil { + if _, err := RunCmdOnRootlessNode(startCmd, node.String()); err != nil { return err } } return nil } -func KillK3sCluster(nodes []string) error { +func KillK3sCluster(nodes []e2e.VagrantNode) error { for _, node := range nodes { - if _, err := RunCmdOnRootlesNode(`systemctl --user stop k3s-rootless`, node); err != nil { + if _, err := RunCmdOnRootlessNode(`systemctl --user stop k3s-rootless`, node.String()); err != nil { return err } - if _, err := RunCmdOnRootlesNode("k3s-killall.sh", node); err != nil { + if _, err := RunCmdOnRootlessNode("k3s-killall.sh", node.String()); err != nil { return err } - if _, err := RunCmdOnRootlesNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node); err != nil { + if _, err := RunCmdOnRootlessNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node.String()); err != nil { return err } } @@ -77,13 +74,13 @@ var _ = ReportAfterEach(e2e.GenReport) var _ = BeforeSuite(func() { var err error if *local { - serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) + tc, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) } else { - serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, 1, 0) + tc, err = e2e.CreateCluster(*nodeOS, 1, 0) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) //Checks if system is using cgroup v2 - _, err = e2e.RunCmdOnNode("cat /sys/fs/cgroup/cgroup.controllers", serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode("cat /sys/fs/cgroup/cgroup.controllers") Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) }) @@ -91,40 +88,32 @@ var _ = BeforeSuite(func() { var _ = Describe("Various Startup Configurations", Ordered, func() { Context("Verify standard startup :", func() { It("Starts K3s with no issues", func() { - err := StartK3sCluster(serverNodeNames, "") + err := StartK3sCluster(tc.Servers, "") Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - kubeConfigFile, err = GenRootlessKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + kubeConfigFile, err := GenRootlessKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) + tc.KubeConfigFile = kubeConfigFile }) It("Checks node and pod status", func() { - fmt.Printf("\nFetching node status\n") + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, false) + _, _ = e2e.ParseNodes(tc.KubeConfigFile, false) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, false) + e2e.DumpPods(tc.KubeConfigFile) }) It("Returns pod metrics", func() { @@ -154,7 +143,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { }) It("Kills the cluster", func() { - err := KillK3sCluster(serverNodeNames) + err := KillK3sCluster(tc.Servers) Expect(err).NotTo(HaveOccurred()) }) }) @@ -168,12 +157,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, serverNodeNames)) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, tc.Servers)) } else { - Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed()) + Expect(e2e.GetCoverageReport(tc.Servers)).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/rootless/rootless_utils.go b/tests/e2e/rootless/rootless_utils.go index 296e0865666d..62f0bd860653 100644 --- a/tests/e2e/rootless/rootless_utils.go +++ b/tests/e2e/rootless/rootless_utils.go @@ -8,8 +8,8 @@ import ( "github.com/k3s-io/k3s/tests/e2e" ) -// RunCmdOnRootlesNode executes a command from within the given node as user vagrant -func RunCmdOnRootlesNode(cmd string, nodename string) (string, error) { +// RunCmdOnRootlessNode executes a command from within the given node as user vagrant +func RunCmdOnRootlessNode(cmd string, nodename string) (string, error) { injectEnv := "" if _, ok := os.LookupEnv("E2E_GOCOVER"); ok && strings.HasPrefix(cmd, "k3s") { injectEnv = "GOCOVERDIR=/tmp/k3scov " @@ -23,11 +23,12 @@ func RunCmdOnRootlesNode(cmd string, nodename string) (string, error) { } func GenRootlessKubeConfigFile(serverName string) (string, error) { - kubeConfig, err := RunCmdOnRootlesNode("cat /home/vagrant/.kube/k3s.yaml", serverName) + kubeConfig, err := RunCmdOnRootlessNode("cat /home/vagrant/.kube/k3s.yaml", serverName) if err != nil { return "", err } - nodeIP, err := e2e.FetchNodeExternalIP(serverName) + vNode := e2e.VagrantNode(serverName) + nodeIP, err := vNode.FetchNodeExternalIP() if err != nil { return "", err } diff --git a/tests/e2e/rotateca/rotateca_test.go b/tests/e2e/rotateca/rotateca_test.go index 3a6f2b0ca14f..ab438a95fa83 100644 --- a/tests/e2e/rotateca/rotateca_test.go +++ b/tests/e2e/rotateca/rotateca_test.go @@ -2,11 +2,10 @@ package rotateca import ( "flag" - "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -29,11 +28,7 @@ func Test_E2ECustomCARotation(t *testing.T) { RunSpecs(t, "Custom Certificate Rotation Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - agentNodeNames []string - serverNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -42,43 +37,32 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) It("Checks node and pod status", func() { - fmt.Printf("\nFetching node status\n") + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.ParseNodes(tc.KubeConfigFile, true) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Generates New CA Certificates", func() { @@ -88,46 +72,38 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() { "DATA_DIR=/opt/rancher/k3s /tmp/generate-custom-ca-certs.sh", } for _, cmd := range cmds { - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) } }) It("Rotates CA Certificates", func() { cmd := "k3s certificate rotate-ca --path=/opt/rancher/k3s/server" - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed()) }) It("Restarts K3s agents", func() { - Expect(e2e.RestartCluster(agentNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(tc.Agents)).To(Succeed()) }) It("Checks node and pod status", func() { Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "420s", "5s").Should(Succeed()) - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) }) }) @@ -139,12 +115,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/s3/s3_test.go b/tests/e2e/s3/s3_test.go index 64066cf45572..4de5519d29fe 100644 --- a/tests/e2e/s3/s3_test.go +++ b/tests/e2e/s3/s3_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -31,11 +32,7 @@ func Test_E2ES3(t *testing.T) { RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -44,93 +41,79 @@ var _ = Describe("Verify Create", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) + tc, err = e2e.CreateLocalCluster(*nodeOS, 1, 0) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 0) + tc, err = e2e.CreateCluster(*nodeOS, 1, 0) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.ParseNodes(tc.KubeConfigFile, true) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("ensures s3 mock is working", func() { - res, err := e2e.RunCmdOnNode("docker ps -a | grep mock\n", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("docker ps -a | grep mock\n") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) It("save s3 snapshot using CLI", func() { - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save "+ - "--etcd-s3-insecure=true "+ - "--etcd-s3-bucket=test-bucket "+ - "--etcd-s3-folder=test-folder "+ - "--etcd-s3-endpoint=localhost:9090 "+ - "--etcd-s3-skip-ssl-verify=true "+ - "--etcd-s3-access-key=test ", - serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save " + + "--etcd-s3-insecure=true " + + "--etcd-s3-bucket=test-bucket " + + "--etcd-s3-folder=test-folder " + + "--etcd-s3-endpoint=localhost:9090 " + + "--etcd-s3-skip-ssl-verify=true " + + "--etcd-s3-access-key=test ") Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("Snapshot on-demand-server-0")) }) It("creates s3 config secret", func() { - res, err := e2e.RunCmdOnNode("k3s kubectl create secret generic k3s-etcd-s3-config --namespace=kube-system "+ - "--from-literal=etcd-s3-insecure=true "+ - "--from-literal=etcd-s3-bucket=test-bucket "+ - "--from-literal=etcd-s3-folder=test-folder "+ - "--from-literal=etcd-s3-endpoint=localhost:9090 "+ - "--from-literal=etcd-s3-skip-ssl-verify=true "+ - "--from-literal=etcd-s3-access-key=test ", - serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s kubectl create secret generic k3s-etcd-s3-config --namespace=kube-system " + + "--from-literal=etcd-s3-insecure=true " + + "--from-literal=etcd-s3-bucket=test-bucket " + + "--from-literal=etcd-s3-folder=test-folder " + + "--from-literal=etcd-s3-endpoint=localhost:9090 " + + "--from-literal=etcd-s3-skip-ssl-verify=true " + + "--from-literal=etcd-s3-access-key=test ") Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("secret/k3s-etcd-s3-config created")) }) It("save s3 snapshot using secret", func() { - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save") Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("Snapshot on-demand-server-0")) }) It("lists saved s3 snapshot", func() { - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot list") Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("file:///var/lib/rancher/k3s/server/db/snapshots/on-demand-server-0")) Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/on-demand-server-0")) }) It("save 3 more s3 snapshots", func() { for _, i := range []string{"1", "2", "3"} { - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save --name special-"+i, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save --name special-" + i) Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("Snapshot special-" + i + "-server-0")) } }) It("lists saved s3 snapshot", func() { - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot list") Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/on-demand-server-0")) Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/special-1-server-0")) @@ -138,25 +121,25 @@ var _ = Describe("Verify Create", Ordered, func() { Expect(res).To(ContainSubstring("s3://test-bucket/test-folder/special-3-server-0")) }) It("delete first on-demand s3 snapshot", func() { - _, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot ls >> ./snapshotname.txt", serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("sudo k3s etcd-snapshot ls >> ./snapshotname.txt") Expect(err).NotTo(HaveOccurred()) - snapshotName, err := e2e.RunCmdOnNode("grep -Eo 'on-demand-server-0-([0-9]+)' ./snapshotname.txt | head -1", serverNodeNames[0]) + snapshotName, err := tc.Servers[0].RunCmdOnNode("grep -Eo 'on-demand-server-0-([0-9]+)' ./snapshotname.txt | head -1") Expect(err).NotTo(HaveOccurred()) - res, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot delete "+snapshotName, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("sudo k3s etcd-snapshot delete " + snapshotName) Expect(err).NotTo(HaveOccurred()) Expect(res).To(ContainSubstring("Snapshot " + strings.TrimSpace(snapshotName) + " deleted")) }) It("prunes s3 snapshots", func() { - _, err := e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save") Expect(err).NotTo(HaveOccurred()) time.Sleep(time.Second) - _, err = e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot save") Expect(err).NotTo(HaveOccurred()) time.Sleep(time.Second) - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot prune", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot prune") Expect(err).NotTo(HaveOccurred()) // There should now be 4 on-demand snapshots - 2 local, and 2 on s3 - res, err = e2e.RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep on-demand | wc -l", serverNodeNames[0]) + res, err = tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep on-demand | wc -l") Expect(err).NotTo(HaveOccurred()) Expect(strings.TrimSpace(res)).To(Equal("4")) }) @@ -164,7 +147,7 @@ var _ = Describe("Verify Create", Ordered, func() { // Wait until the retention works with 3 minutes fmt.Printf("\nWaiting 3 minutes until retention works\n") time.Sleep(3 * time.Minute) - res, err := e2e.RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep etcd-snapshot | wc -l", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s etcd-snapshot ls 2>/dev/null | grep etcd-snapshot | wc -l") Expect(err).NotTo(HaveOccurred()) Expect(strings.TrimSpace(res)).To(Equal("4")) }) @@ -178,12 +161,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed()) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/secretsencryption/secretsencryption_test.go b/tests/e2e/secretsencryption/secretsencryption_test.go index 463d52853584..266c6fff2629 100644 --- a/tests/e2e/secretsencryption/secretsencryption_test.go +++ b/tests/e2e/secretsencryption/secretsencryption_test.go @@ -2,11 +2,10 @@ package secretsencryption import ( "flag" - "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,10 +31,7 @@ func Test_E2ESecretsEncryption(t *testing.T) { RunSpecs(t, "Secrets Encryption Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -44,53 +40,44 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0) } else { - serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + tc.Hardened = *hardened + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) Expect(err).NotTo(HaveOccurred()) }) It("Checks node and pod status", func() { - fmt.Printf("\nFetching node status\n") + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + e2e.ParseNodes(tc.KubeConfigFile, true) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Deploys several secrets", func() { - _, err := e2e.DeployWorkload("secrets.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("secrets.yaml") Expect(err).NotTo(HaveOccurred(), "Secrets not deployed") }) It("Verifies encryption start stage", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + for _, node := range tc.Servers { + res, err := node.RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) Expect(res).Should(ContainSubstring("Current Rotation Stage: start")) @@ -100,17 +87,12 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { It("Rotates the Secrets-Encryption Keys", func() { cmd := "k3s secrets-encrypt rotate-keys" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - // Before we fail on error, get the server logs which actually contain the error - var slogs string - if err != nil { - slogs, _ = e2e.RunCmdOnNode("journalctl -u k3s -n 10", serverNodeNames[0]) - } - Expect(err).NotTo(HaveOccurred(), res+slogs) - for i, nodeName := range serverNodeNames { + res, err := tc.Servers[0].RunCmdOnNode(cmd) + Expect(err).NotTo(HaveOccurred(), res) + for i, node := range tc.Servers { Eventually(func(g Gomega) { cmd := "k3s secrets-encrypt status" - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred(), res) g.Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match")) if i == 0 { @@ -123,14 +105,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Verifies reencryption_finished stage", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) g.Expect(res).Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) @@ -144,17 +126,17 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Disabling Secrets-Encryption", func() { It("Disables encryption", func() { cmd := "k3s secrets-encrypt disable" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode(cmd) }, "240s", "10s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) - for i, nodeName := range serverNodeNames { + for i, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred(), res) if i == 0 { g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled")) @@ -166,14 +148,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed()) }) It("Verifies encryption disabled on all nodes", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled")) + g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Disabled")) }, "420s", "2s").Should(Succeed()) } }) @@ -183,17 +165,17 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Enabling Secrets-Encryption", func() { It("Enables encryption", func() { cmd := "k3s secrets-encrypt enable" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode(cmd) }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) - for i, nodeName := range serverNodeNames { + for i, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred(), res) if i == 0 { g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) @@ -205,14 +187,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed()) }) It("Verifies encryption enabled on all nodes", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled")) + g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Enabled")) }, "420s", "2s").Should(Succeed()) } }) @@ -227,12 +209,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, serverNodeNames)) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, tc.Servers)) } else { - Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed()) + Expect(e2e.GetCoverageReport(tc.Servers)).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/secretsencryption_old/secretsencryption_test.go b/tests/e2e/secretsencryption_old/secretsencryption_test.go index c7b1f3c98870..c6bec5b6a5dd 100644 --- a/tests/e2e/secretsencryption_old/secretsencryption_test.go +++ b/tests/e2e/secretsencryption_old/secretsencryption_test.go @@ -2,11 +2,10 @@ package secretsencryption import ( "flag" - "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -29,10 +28,7 @@ func Test_E2ESecretsEncryptionOld(t *testing.T) { RunSpecs(t, "Secrets Encryption Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -41,53 +37,43 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0) } else { - serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + tc.Hardened = *hardened + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) It("Checks node and pod status", func() { - fmt.Printf("\nFetching node status\n") + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Deploys several secrets", func() { - _, err := e2e.DeployWorkload("secrets.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("secrets.yaml") Expect(err).NotTo(HaveOccurred(), "Secrets not deployed") }) It("Verifies encryption start stage", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + for _, node := range tc.Servers { + res, err := node.RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) Expect(res).Should(ContainSubstring("Current Rotation Stage: start")) @@ -97,11 +83,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { It("Prepares for Secrets-Encryption Rotation", func() { cmd := "k3s secrets-encrypt prepare" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) - for i, nodeName := range serverNodeNames { + for i, node := range tc.Servers { cmd := "k3s secrets-encrypt status" - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match")) if i == 0 { @@ -113,37 +99,29 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Checks node and pod status", func() { Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "420s", "5s").Should(Succeed()) - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "360s", "5s").Should(Succeed()) + e2e.DumpPods(tc.KubeConfigFile) }) It("Verifies encryption prepare stage", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) g.Expect(res).Should(ContainSubstring("Current Rotation Stage: prepare")) @@ -154,12 +132,12 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { It("Rotates the Secrets-Encryption Keys", func() { cmd := "k3s secrets-encrypt rotate" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) - for i, nodeName := range serverNodeNames { + for i, node := range tc.Servers { Eventually(func(g Gomega) { cmd := "k3s secrets-encrypt status" - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred(), res) g.Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match")) if i == 0 { @@ -172,14 +150,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Verifies encryption rotate stage", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) g.Expect(res).Should(ContainSubstring("Current Rotation Stage: rotate")) @@ -190,16 +168,16 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { It("Reencrypts the Secrets-Encryption Keys", func() { cmd := "k3s secrets-encrypt reencrypt" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode(cmd) }, "240s", "10s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) - for _, nodeName := range serverNodeNames[1:] { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + for _, node := range tc.Servers[1:] { + res, err := node.RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match")) Expect(res).Should(ContainSubstring("Current Rotation Stage: rotate")) @@ -207,14 +185,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s Servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Verifies Encryption Reencrypt Stage", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) g.Expect(res).Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) @@ -227,21 +205,21 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Disabling Secrets-Encryption", func() { It("Disables encryption", func() { cmd := "k3s secrets-encrypt disable" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt reencrypt -f --skip" - res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode(cmd) }, "240s", "10s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) - for i, nodeName := range serverNodeNames { + for i, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred(), res) if i == 0 { g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled")) @@ -253,14 +231,14 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed()) }) It("Verifies encryption disabled on all nodes", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled")) + g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Disabled")) }, "420s", "2s").Should(Succeed()) } }) @@ -270,28 +248,28 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Enabling Secrets-Encryption", func() { It("Enables encryption", func() { cmd := "k3s secrets-encrypt enable" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt reencrypt -f --skip" - res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + res, err = tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred(), res) cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode(cmd) }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(tc.Servers)).To(Succeed()) }) It("Verifies encryption enabled on all nodes", func() { cmd := "k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled")) + g.Expect(node.RunCmdOnNode(cmd)).Should(ContainSubstring("Encryption Status: Enabled")) }, "420s", "2s").Should(Succeed()) } @@ -307,10 +285,10 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if !failed { - Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed()) + Expect(e2e.GetCoverageReport(tc.Servers)).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/snapshotrestore/Vagrantfile b/tests/e2e/snapshotrestore/Vagrantfile deleted file mode 100644 index 6e9cac5f9613..000000000000 --- a/tests/e2e/snapshotrestore/Vagrantfile +++ /dev/null @@ -1,106 +0,0 @@ -ENV['VAGRANT_NO_PARALLEL'] = 'no' -NODE_ROLES = (ENV['E2E_NODE_ROLES'] || - ["server-0", "server-1", "server-2", "agent-0", "agent-1"]) -NODE_BOXES = (ENV['E2E_NODE_BOXES'] || - ['bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04']) -GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") -RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") -GOCOVER = (ENV['E2E_GOCOVER'] || "") -NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i -NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i -# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks -NETWORK_PREFIX = "10.10.10" -install_type = "" - -def provision(vm, role, role_num, node_num) - vm.box = NODE_BOXES[node_num] - vm.hostname = role - # An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 - node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" - vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - - scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" - load vagrant_defaults - - defaultOSConfigure(vm) - addCoverageDir(vm, role, GOCOVER) - install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) - - vm.provision "shell", inline: "ping -c 2 k3s.io" - - if role.include?("server") && role_num == 0 - vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| - k3s.args = "server " - k3s.config = <<~YAML - token: vagrant - cluster-init: true - node-external-ip: #{NETWORK_PREFIX}.100 - flannel-iface: eth1 - tls-san: #{NETWORK_PREFIX}.100.nip.io - YAML - k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - end - - elsif role.include?("server") && role_num != 0 - vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s| - k3s.args = "server" - k3s.config = <<~YAML - server: "https://#{NETWORK_PREFIX}.100:6443" - token: vagrant - node-external-ip: #{node_ip} - flannel-iface: eth1 - YAML - k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}] - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - end - end - - if role.include?("agent") - vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s| - k3s.args = "agent" - k3s.config = <<~YAML - server: "https://#{NETWORK_PREFIX}.100:6443" - token: vagrant - node-external-ip: #{node_ip} - flannel-iface: eth1 - YAML - k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - end - end - if vm.box.to_s.include?("microos") - vm.provision 'k3s-reload', type: 'reload', run: 'once' - end - # This step does not run by default and is designed to be called by higher level tools -end - -Vagrant.configure("2") do |config| - config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"] - # Default provider is libvirt, virtualbox is only provided as a backup - config.vm.provider "libvirt" do |v| - v.cpus = NODE_CPUS - v.memory = NODE_MEMORY - # We replicate the default prefix, but add a timestamp to enable parallel runs and cleanup of old VMs - v.default_prefix = File.basename(Dir.getwd) + "_" + Time.now.to_i.to_s + "_" - end - config.vm.provider "virtualbox" do |v| - v.cpus = NODE_CPUS - v.memory = NODE_MEMORY - end - - if NODE_ROLES.kind_of?(String) - NODE_ROLES = NODE_ROLES.split(" ", -1) - end - if NODE_BOXES.kind_of?(String) - NODE_BOXES = NODE_BOXES.split(" ", -1) - end - - NODE_ROLES.each_with_index do |role, i| - role_num = role.split("-", -1).pop.to_i - config.vm.define role do |node| - provision(node.vm, role, role_num, i) - end - end -end diff --git a/tests/e2e/snapshotrestore/snapshotrestore_test.go b/tests/e2e/snapshotrestore/snapshotrestore_test.go deleted file mode 100644 index 0f2fb8b9bd26..000000000000 --- a/tests/e2e/snapshotrestore/snapshotrestore_test.go +++ /dev/null @@ -1,318 +0,0 @@ -package snapshotrestore - -import ( - "flag" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/k3s-io/k3s/tests/e2e" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -// Valid nodeOS: -// bento/ubuntu-24.04, opensuse/Leap-15.6.x86_64 -// eurolinux-vagrant/rocky-8, eurolinux-vagrant/rocky-9, - -var nodeOS = flag.String("nodeOS", "bento/ubuntu-24.04", "VM operating system") -var serverCount = flag.Int("serverCount", 3, "number of server nodes") -var agentCount = flag.Int("agentCount", 1, "number of agent nodes") -var hardened = flag.Bool("hardened", false, "true or false") -var ci = flag.Bool("ci", false, "running on CI") -var local = flag.Bool("local", false, "deploy a locally built K3s binary") - -// Environment Variables Info: -// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) - -func Test_E2ESnapshotRestore(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - suiteConfig, reporterConfig := GinkgoConfiguration() - RunSpecs(t, "SnapshotRestore Test Suite", suiteConfig, reporterConfig) -} - -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string - snapshotname string -) - -var _ = ReportAfterEach(e2e.GenReport) - -var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { - Context("Cluster creates snapshots and workloads:", func() { - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - }) - - It("Verifies test workload before snapshot is created", func() { - res, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res) - - Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile - res, err := e2e.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: %q result: %s", cmd, res) - }, "240s", "5s").Should(Succeed()) - }) - - It("Verifies Snapshot is created", func() { - Eventually(func(g Gomega) { - cmd := "k3s etcd-snapshot save" - _, err := e2e.RunCmdOnNode(cmd, "server-0") - g.Expect(err).NotTo(HaveOccurred()) - cmd = "ls /var/lib/rancher/k3s/server/db/snapshots/" - snapshotname, err = e2e.RunCmdOnNode(cmd, "server-0") - g.Expect(err).NotTo(HaveOccurred()) - fmt.Println("Snapshot Name", snapshotname) - g.Expect(snapshotname).Should(ContainSubstring("on-demand-server-0")) - }, "420s", "10s").Should(Succeed()) - }) - - It("Verifies another test workload after snapshot is created", func() { - _, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile - res, err := e2e.RunCommand(cmd) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") - }, "240s", "5s").Should(Succeed()) - }) - - }) - - Context("Cluster is reset normally", func() { - It("Resets the cluster", func() { - for _, nodeName := range serverNodeNames { - cmd := "systemctl stop k3s" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - if nodeName != serverNodeNames[0] { - cmd = "k3s-killall.sh" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - } - } - - cmd := "k3s server --cluster-reset" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) - - cmd = "systemctl start k3s" - Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred()) - }) - - It("Checks that other servers are not ready", func() { - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - if strings.Contains(node.Name, serverNodeNames[0]) || strings.Contains(node.Name, "agent-") { - g.Expect(node.Status).Should(Equal("Ready")) - } else { - g.Expect(node.Status).Should(Equal("NotReady")) - } - } - }, "240s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - }) - - It("Rejoins other servers to cluster", func() { - // We must remove the db directory on the other servers before restarting k3s - // otherwise the nodes may join the old cluster - for _, nodeName := range serverNodeNames[1:] { - cmd := "rm -rf /var/lib/rancher/k3s/server/db" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - } - - for _, nodeName := range serverNodeNames[1:] { - cmd := "systemctl start k3s" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - time.Sleep(20 * time.Second) //Stagger the restarts for etcd leaners - } - }) - - It("Checks that all nodes and pods are ready", func() { - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - nodeJournal, _ := e2e.GetJournalLogs(node.Name) - g.Expect(node.Status).Should(Equal("Ready"), nodeJournal) - } - }, "420s", "5s").Should(Succeed()) - - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "420s", "5s").Should(Succeed()) - }) - It("Verifies that workload1 and workload1 exist", func() { - cmd := "kubectl get pods --kubeconfig=" + kubeConfigFile - res, err := e2e.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-clusterip")) - Expect(res).Should(ContainSubstring("test-nodeport")) - }) - - }) - - Context("Cluster restores from snapshot", func() { - It("Restores the snapshot", func() { - //Stop k3s on all nodes - for _, nodeName := range serverNodeNames { - cmd := "systemctl stop k3s" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - if nodeName != serverNodeNames[0] { - cmd = "k3s-killall.sh" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - } - } - //Restores from snapshot on server-0 - cmd := "k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) - - cmd = "systemctl start k3s" - Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred()) - - }) - - It("Checks that other servers are not ready", func() { - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - if strings.Contains(node.Name, serverNodeNames[0]) || strings.Contains(node.Name, "agent-") { - g.Expect(node.Status).Should(Equal("Ready")) - } else { - g.Expect(node.Status).Should(Equal("NotReady")) - } - } - }, "240s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - }) - - It("Rejoins other servers to cluster", func() { - // We must remove the db directory on the other servers before restarting k3s - // otherwise the nodes may join the old cluster - for _, nodeName := range serverNodeNames[1:] { - cmd := "rm -rf /var/lib/rancher/k3s/server/db" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - } - - for _, nodeName := range serverNodeNames[1:] { - cmd := "systemctl start k3s" - Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) - } - }) - - It("Checks that all nodes and pods are ready", func() { - //Verifies node is up and pods running - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - }) - - It("Verifies that workload1 exists and workload2 does not", func() { - cmd := "kubectl get pods --kubeconfig=" + kubeConfigFile - res, err := e2e.RunCommand(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("test-clusterip")) - Expect(res).ShouldNot(ContainSubstring("test-nodeport")) - }) - }) -}) - -var failed bool -var _ = AfterEach(func() { - failed = failed || CurrentSpecReport().Failed() -}) - -var _ = AfterSuite(func() { - if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) - } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) - } - if !failed || *ci { - Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) - } -}) diff --git a/tests/e2e/splitserver/splitserver_test.go b/tests/e2e/splitserver/splitserver_test.go index 642dbc1592e3..4381cffaae1c 100644 --- a/tests/e2e/splitserver/splitserver_test.go +++ b/tests/e2e/splitserver/splitserver_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -30,28 +31,28 @@ var hardened = flag.Bool("hardened", false, "true or false") // createSplitCluster creates a split server cluster with the given nodeOS, etcdCount, controlPlaneCount, and agentCount. // It duplicates and merges functionality found in the e2e.CreateCluster and e2e.CreateLocalCluster functions. -func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount int, local bool) ([]string, []string, []string, error) { - etcdNodeNames := make([]string, etcdCount) - cpNodeNames := make([]string, controlPlaneCount) - agentNodeNames := make([]string, agentCount) +func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount int, local bool) ([]e2e.VagrantNode, []e2e.VagrantNode, []e2e.VagrantNode, error) { + etcdNodes := make([]e2e.VagrantNode, etcdCount) + cpNodes := make([]e2e.VagrantNode, controlPlaneCount) + agentNodes := make([]e2e.VagrantNode, agentCount) for i := 0; i < etcdCount; i++ { - etcdNodeNames[i] = "server-etcd-" + strconv.Itoa(i) + etcdNodes[i] = e2e.VagrantNode("server-etcd-" + strconv.Itoa(i)) } for i := 0; i < controlPlaneCount; i++ { - cpNodeNames[i] = "server-cp-" + strconv.Itoa(i) + cpNodes[i] = e2e.VagrantNode("server-cp-" + strconv.Itoa(i)) } for i := 0; i < agentCount; i++ { - agentNodeNames[i] = "agent-" + strconv.Itoa(i) + agentNodes[i] = e2e.VagrantNode("agent-" + strconv.Itoa(i)) } - nodeRoles := strings.Join(etcdNodeNames, " ") + " " + strings.Join(cpNodeNames, " ") + " " + strings.Join(agentNodeNames, " ") + nodeRoles := strings.Join(e2e.VagrantSlice(etcdNodes), " ") + " " + strings.Join(e2e.VagrantSlice(cpNodes), " ") + " " + strings.Join(e2e.VagrantSlice(agentNodes), " ") nodeRoles = strings.TrimSpace(nodeRoles) nodeBoxes := strings.Repeat(nodeOS+" ", etcdCount+controlPlaneCount+agentCount) nodeBoxes = strings.TrimSpace(nodeBoxes) - allNodes := append(etcdNodeNames, cpNodeNames...) - allNodes = append(allNodes, agentNodeNames...) + allNodeNames := append(e2e.VagrantSlice(etcdNodes), e2e.VagrantSlice(cpNodes)...) + allNodeNames = append(allNodeNames, e2e.VagrantSlice(agentNodes)...) var testOptions string for _, env := range os.Environ() { @@ -62,15 +63,15 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount // Provision the first etcd node. In GitHub Actions, this also imports the VM image into libvirt, which // takes time and can cause the next vagrant up to fail if it is not given enough time to complete. - cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodeNames[0]) + cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodes[0].String()) fmt.Println(cmd) if _, err := e2e.RunCommand(cmd); err != nil { - return etcdNodeNames, cpNodeNames, agentNodeNames, err + return etcdNodes, cpNodes, agentNodes, err } // Bring up the rest of the nodes in parallel errg, _ := errgroup.WithContext(context.Background()) - for _, node := range allNodes[1:] { + for _, node := range allNodeNames[1:] { cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, node) errg.Go(func() error { _, err := e2e.RunCommand(cmd) @@ -80,24 +81,25 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount time.Sleep(10 * time.Second) } if err := errg.Wait(); err != nil { - return etcdNodeNames, cpNodeNames, agentNodeNames, err + return etcdNodes, cpNodes, agentNodes, err } if local { testOptions += " E2E_RELEASE_VERSION=skip" - for _, node := range allNodes { + for _, node := range allNodeNames { cmd := fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node, node) if _, err := e2e.RunCommand(cmd); err != nil { - return etcdNodeNames, cpNodeNames, agentNodeNames, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) + return etcdNodes, cpNodes, agentNodes, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) } - if _, err := e2e.RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil { - return etcdNodeNames, cpNodeNames, agentNodeNames, err + cmd = fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant ssh %s -c "sudo mv /tmp/k3s /usr/local/bin/"`, node, node) + if _, err := e2e.RunCommand(cmd); err != nil { + return etcdNodes, cpNodes, agentNodes, err } } } // Install K3s on all nodes in parallel errg, _ = errgroup.WithContext(context.Background()) - for _, node := range allNodes { + for _, node := range allNodeNames { cmd = fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, testOptions, node) errg.Go(func() error { _, err := e2e.RunCommand(cmd) @@ -107,9 +109,9 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount time.Sleep(10 * time.Second) } if err := errg.Wait(); err != nil { - return etcdNodeNames, cpNodeNames, agentNodeNames, err + return etcdNodes, cpNodes, agentNodes, err } - return etcdNodeNames, cpNodeNames, agentNodeNames, nil + return etcdNodes, cpNodes, agentNodes, nil } func Test_E2ESplitServer(t *testing.T) { @@ -120,10 +122,10 @@ func Test_E2ESplitServer(t *testing.T) { } var ( - kubeConfigFile string - etcdNodeNames []string - cpNodeNames []string - agentNodeNames []string + tc *e2e.TestConfig // We don't use the Server and Agents from this + etcdNodes []e2e.VagrantNode + cpNodes []e2e.VagrantNode + agentNodes []e2e.VagrantNode ) var _ = ReportAfterEach(e2e.GenReport) @@ -132,72 +134,66 @@ var _ = Describe("Verify Create", Ordered, func() { Context("Cluster :", func() { It("Starts up with no issues", func() { var err error - etcdNodeNames, cpNodeNames, agentNodeNames, err = createSplitCluster(*nodeOS, *etcdCount, *controlPlaneCount, *agentCount, *local) + etcdNodes, cpNodes, agentNodes, err = createSplitCluster(*nodeOS, *etcdCount, *controlPlaneCount, *agentCount, *local) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) fmt.Println("CLUSTER CONFIG") fmt.Println("OS:", *nodeOS) - fmt.Println("Etcd Server Nodes:", etcdNodeNames) - fmt.Println("Control Plane Server Nodes:", cpNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(cpNodeNames[0]) + fmt.Println("Etcd Server Nodes:", etcdNodes) + fmt.Println("Control Plane Server Nodes:", cpNodes) + fmt.Println("Agent Nodes:", agentNodes) + kubeConfigFile, err := e2e.GenKubeConfigFile(cpNodes[0].String()) + tc = &e2e.TestConfig{ + KubeConfigFile: kubeConfigFile, + Hardened: *hardened, + } Expect(err).NotTo(HaveOccurred()) }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Verifies ClusterIP Service", func() { - _, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("clusterip.yaml") Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) - clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false) + clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false) cmd = "curl -L --insecure http://" + clusterip + "/name.html" - for _, nodeName := range cpNodeNames { + for _, node := range cpNodes { Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, nodeName) + return node.RunCmdOnNode(cmd) }, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) } }) It("Verifies NodePort Service", func() { - _, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("nodeport.yaml") Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - for _, nodeName := range cpNodeNames { - nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName) - cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" + for _, node := range cpNodes { + nodeExternalIP, _ := node.FetchNodeExternalIP() + cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" nodeport, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") @@ -210,17 +206,17 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies LoadBalancer Service", func() { - _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("loadbalancer.yaml") Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed") - for _, nodeName := range cpNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) + for _, node := range cpNodes { + ip, _ := node.FetchNodeExternalIP() - cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" + cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" port, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd) @@ -233,11 +229,11 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies Ingress", func() { - _, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("ingress.yaml") Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - for _, nodeName := range cpNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) + for _, node := range cpNodes { + ip, _ := node.FetchNodeExternalIP() cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html" Eventually(func() (string, error) { return e2e.RunCommand(cmd) @@ -246,30 +242,26 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies Daemonset", func() { - _, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("daemonset.yaml") Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") Eventually(func(g Gomega) { - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - fmt.Println("POD COUNT") - fmt.Println(count) - fmt.Println("CP COUNT") - fmt.Println(len(cpNodeNames)) - g.Expect(len(cpNodeNames)).Should((Equal(count)), "Daemonset pod count does not match cp node count") + count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cpNodes).To(HaveLen(count), "Daemonset pod count does not match cp node count") }, "240s", "10s").Should(Succeed()) }) It("Verifies dns access", func() { - _, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("dnsutils.yaml") Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") - cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("dnsutils"), "failed cmd: "+cmd) - cmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" + cmd = "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"), "failed cmd: "+cmd) @@ -283,8 +275,8 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - allNodes := append(cpNodeNames, etcdNodeNames...) - allNodes = append(allNodes, agentNodeNames...) + allNodes := append(cpNodes, etcdNodes...) + allNodes = append(allNodes, agentNodes...) if failed { AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, allNodes)) } else { @@ -292,6 +284,6 @@ var _ = AfterSuite(func() { } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index f36181fffd27..392dc1e7aae6 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,19 +33,15 @@ func Test_E2EStartupValidation(t *testing.T) { RunSpecs(t, "Startup Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig -func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error { +func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string, agentYAML string) error { for _, node := range nodes { var yamlCmd string var resetCmd string var startCmd string - if strings.Contains(node, "server") { + if strings.Contains(node.String(), "server") { resetCmd = "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML) startCmd = "systemctl start k3s" @@ -53,32 +50,32 @@ func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML) startCmd = "systemctl start k3s-agent" } - if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil { + if _, err := node.RunCmdOnNode(resetCmd); err != nil { return err } - if _, err := e2e.RunCmdOnNode(yamlCmd, node); err != nil { + if _, err := node.RunCmdOnNode(yamlCmd); err != nil { return err } - if _, err := e2e.RunCmdOnNode(startCmd, node); err != nil { + if _, err := node.RunCmdOnNode(startCmd); err != nil { return &e2e.NodeError{Node: node, Cmd: startCmd, Err: err} } } return nil } -func KillK3sCluster(nodes []string) error { +func KillK3sCluster(nodes []e2e.VagrantNode) error { for _, node := range nodes { - if _, err := e2e.RunCmdOnNode("k3s-killall.sh", node); err != nil { + if _, err := node.RunCmdOnNode("k3s-killall.sh"); err != nil { return err } - if _, err := e2e.RunCmdOnNode("journalctl --flush --sync --rotate --vacuum-size=1", node); err != nil { + if _, err := node.RunCmdOnNode("journalctl --flush --sync --rotate --vacuum-size=1"); err != nil { return err } - if _, err := e2e.RunCmdOnNode("rm -rf /etc/rancher/k3s/config.yaml.d", node); err != nil { + if _, err := node.RunCmdOnNode("rm -rf /etc/rancher/k3s/config.yaml.d"); err != nil { return err } - if strings.Contains(node, "server") { - if _, err := e2e.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db", node); err != nil { + if strings.Contains(node.String(), "server") { + if _, err := node.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db"); err != nil { return err } } @@ -91,9 +88,9 @@ var _ = ReportAfterEach(e2e.GenReport) var _ = BeforeSuite(func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1) + tc, err = e2e.CreateLocalCluster(*nodeOS, 1, 1) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1) + tc, err = e2e.CreateCluster(*nodeOS, 1, 1) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) }) @@ -101,48 +98,36 @@ var _ = BeforeSuite(func() { var _ = Describe("Various Startup Configurations", Ordered, func() { Context("Verify dedicated supervisor port", func() { It("Starts K3s with no issues", func() { - for _, node := range agentNodeNames { + for _, node := range tc.Agents { cmd := "mkdir -p /etc/rancher/k3s/config.yaml.d; grep -F server: /etc/rancher/k3s/config.yaml | sed s/6443/9345/ > /tmp/99-server.yaml; sudo mv /tmp/99-server.yaml /etc/rancher/k3s/config.yaml.d/" - res, err := e2e.RunCmdOnNode(cmd, node) + res, err := node.RunCmdOnNode(cmd) By("checking command results: " + res) Expect(err).NotTo(HaveOccurred()) } supervisorPortYAML := "supervisor-port: 9345\napiserver-port: 6443\napiserver-bind-address: 0.0.0.0\ndisable: traefik\nnode-taint: node-role.kubernetes.io/control-plane:NoExecute" - err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), supervisorPortYAML, "") + err := StartK3sCluster(append(tc.Servers, tc.Agents...), supervisorPortYAML, "") Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS:" + *nodeOS) + By(tc.Status()) + tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) }) It("Checks node and pod status", func() { fmt.Printf("\nFetching node status\n") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Returns pod metrics", func() { @@ -161,7 +146,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { It("Runs an interactive command a pod", func() { cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a" - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) }) @@ -172,136 +157,103 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + err := KillK3sCluster(append(tc.Servers, tc.Agents...)) Expect(err).NotTo(HaveOccurred()) }) }) - Context("Verify CRI-Dockerd :", func() { + Context("Verify CRI-Dockerd", func() { It("Starts K3s with no issues", func() { dockerYAML := "docker: true" - err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), dockerYAML, dockerYAML) + err := StartK3sCluster(append(tc.Servers, tc.Agents...), dockerYAML, dockerYAML) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS:" + *nodeOS) + By(tc.Status()) + tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) }) It("Checks node and pod status", func() { fmt.Printf("\nFetching node status\n") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + err := KillK3sCluster(append(tc.Servers, tc.Agents...)) Expect(err).NotTo(HaveOccurred()) }) }) Context("Verify prefer-bundled-bin flag", func() { It("Starts K3s with no issues", func() { preferBundledYAML := "prefer-bundled-bin: true" - err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), preferBundledYAML, preferBundledYAML) + err := StartK3sCluster(append(tc.Servers, tc.Agents...), preferBundledYAML, preferBundledYAML) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS:" + *nodeOS) + By(tc.Status()) + tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) }) It("Checks node and pod status", func() { fmt.Printf("\nFetching node status\n") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + err := KillK3sCluster(append(tc.Servers, tc.Agents...)) Expect(err).NotTo(HaveOccurred()) }) }) Context("Verify disable-agent and egress-selector-mode flags", func() { It("Starts K3s with no issues", func() { disableAgentYAML := "disable-agent: true\negress-selector-mode: cluster" - err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), disableAgentYAML, "") + err := StartK3sCluster(append(tc.Servers, tc.Agents...), disableAgentYAML, "") Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS:" + *nodeOS) + By(tc.Status()) + tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) }) It("Checks node and pod status", func() { fmt.Printf("\nFetching node status\n") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "360s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Returns pod metrics", func() { @@ -320,7 +272,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { It("Runs an interactive command a pod", func() { cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a" - _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) }) @@ -331,57 +283,56 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + err := KillK3sCluster(append(tc.Servers, tc.Agents...)) Expect(err).NotTo(HaveOccurred()) }) }) Context("Verify server picks up preloaded images on start", func() { It("Downloads and preloads images", func() { - _, err := e2e.RunCmdOnNode("docker pull ranchertest/mytestcontainer:latest", serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("docker pull ranchertest/mytestcontainer:latest") Expect(err).NotTo(HaveOccurred()) - _, err = e2e.RunCmdOnNode("docker save ranchertest/mytestcontainer:latest -o /tmp/mytestcontainer.tar", serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode("docker save ranchertest/mytestcontainer:latest -o /tmp/mytestcontainer.tar") Expect(err).NotTo(HaveOccurred()) - _, err = e2e.RunCmdOnNode("mkdir -p /var/lib/rancher/k3s/agent/images/", serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode("mkdir -p /var/lib/rancher/k3s/agent/images/") Expect(err).NotTo(HaveOccurred()) - _, err = e2e.RunCmdOnNode("mv /tmp/mytestcontainer.tar /var/lib/rancher/k3s/agent/images/", serverNodeNames[0]) + _, err = tc.Servers[0].RunCmdOnNode("mv /tmp/mytestcontainer.tar /var/lib/rancher/k3s/agent/images/") Expect(err).NotTo(HaveOccurred()) }) It("Starts K3s with no issues", func() { - err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), "", "") + err := StartK3sCluster(append(tc.Servers, tc.Agents...), "", "") Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS:" + *nodeOS) + By(tc.Status()) + tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) }) It("has loaded the test container image", func() { Eventually(func() (string, error) { cmd := "k3s crictl images | grep ranchertest/mytestcontainer" - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode(cmd) }, "120s", "5s").Should(ContainSubstring("ranchertest/mytestcontainer")) }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + err := KillK3sCluster(append(tc.Servers, tc.Agents...)) Expect(err).NotTo(HaveOccurred()) }) }) Context("Verify server fails to start with bootstrap token", func() { It("Fails to start with a meaningful error", func() { tokenYAML := "token: aaaaaa.bbbbbbbbbbbbbbbb" - err := StartK3sCluster(append(serverNodeNames, agentNodeNames...), tokenYAML, tokenYAML) + err := StartK3sCluster(append(tc.Servers, tc.Agents...), tokenYAML, tokenYAML) Expect(err).To(HaveOccurred()) Eventually(func(g Gomega) { - logs, err := e2e.GetJournalLogs(serverNodeNames[0]) + logs, err := tc.Servers[0].GetJournalLogs() g.Expect(err).NotTo(HaveOccurred()) g.Expect(logs).To(ContainSubstring("failed to normalize server token")) }, "120s", "5s").Should(Succeed()) }) It("Kills the cluster", func() { - err := KillK3sCluster(append(serverNodeNames, agentNodeNames...)) + err := KillK3sCluster(append(tc.Servers, tc.Agents...)) Expect(err).NotTo(HaveOccurred()) }) }) @@ -394,13 +345,13 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("config", e2e.GetConfig(append(serverNodeNames, agentNodeNames...))) - Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + AddReportEntry("config", e2e.GetConfig(append(tc.Servers, tc.Agents...))) + Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed()) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go b/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go index dec419e176c4..80a98dbd496a 100644 --- a/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go +++ b/tests/e2e/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go @@ -13,6 +13,7 @@ import ( "testing" "text/template" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -33,311 +34,312 @@ func Test_E2EPoliciesAndFirewall(t *testing.T) { } var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string - nodes []e2e.Node + tc *e2e.TestConfig + nodes []e2e.Node ) var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify Services Traffic policies and firewall config", Ordered, func() { - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Node Status", func() { - Eventually(func(g Gomega) { + Context("Start cluster with minimal configuration", func() { + It("Starts up with no issues", func() { var err error - nodes, err = e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + if *local { + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } - }, "300s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Pod Status", func() { - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + var err error + nodes, err = e2e.ParseNodes(tc.KubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) } - } - }, "300s", "5s").Should(Succeed()) - _, err := e2e.ParsePods(kubeConfigFile, true) - Expect(err).NotTo(HaveOccurred()) + }, "300s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(tc.KubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "300s", "5s").Should(Succeed()) + e2e.DumpPods(tc.KubeConfigFile) + }) }) + Context("Deploy external traffic workloads to test external traffic policies", func() { + // Verifies that the service with external traffic policy=local is deployed + // Verifies that the external-ip is only set to the node IP where the server runs + // It also verifies that the service with external traffic policy=cluster has both node IPs as externalIP + It("Verify external traffic policy=local gets set up correctly", func() { + _, err := tc.DeployWorkload("loadbalancer.yaml") + Expect(err).NotTo(HaveOccurred(), "loadbalancer not deployed") + _, err = tc.DeployWorkload("loadbalancer-extTrafficPol.yaml") + Expect(err).NotTo(HaveOccurred(), "loadbalancer-extTrafficPol not deployed") + + // Check where the server pod is running + var serverNodeName string + Eventually(func() (string, error) { + pods, err := tests.ParsePods(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred(), "failed to parse pods") + for _, pod := range pods { + if strings.Contains(pod.Name, "test-loadbalancer-ext") { + serverNodeName = pod.Spec.NodeName + break + } + } + return serverNodeName, nil + }, "25s", "5s").ShouldNot(BeEmpty(), "server pod not found") - // Verifies that the service with external traffic policy=local is deployed - // Verifies that the external-ip is only set to the node IP where the server runs - // It also verifies that the service with external traffic policy=cluster has both node IPs as externalIP - It("Verify external traffic policy=local gets set up correctly", func() { - _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred(), "loadbalancer not deployed") - _, err = e2e.DeployWorkload("loadbalancer-extTrafficPol.yaml", kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred(), "loadbalancer-extTrafficPol not deployed") - - // Check where the server pod is running - var serverNodeName string - Eventually(func() (string, error) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred(), "failed to parse pods") - for _, pod := range pods { - if strings.Contains(pod.Name, "test-loadbalancer-ext") { - serverNodeName = pod.Node - break + var serverNodeIP string + for _, node := range nodes { + if node.Name == serverNodeName { + serverNodeIP = node.InternalIP } } - return serverNodeName, nil - }, "25s", "5s").ShouldNot(BeEmpty(), "server pod not found") - var serverNodeIP string - for _, node := range nodes { - if node.Name == serverNodeName { - serverNodeIP = node.InternalIP + // Verify there is only one external-ip and it is matching the node IP + lbSvc := "nginx-loadbalancer-svc" + lbSvcExt := "nginx-loadbalancer-svc-ext" + Eventually(func() ([]string, error) { + return e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvc) + }, "25s", "5s").Should(HaveLen(2), "external IP count not equal to 2") + + Eventually(func(g Gomega) { + externalIPs, _ := e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvcExt) + g.Expect(externalIPs).To(HaveLen(1), "more than 1 exernalIP found") + g.Expect(externalIPs[0]).To(Equal(serverNodeIP), "external IP does not match servernodeIP") + }, "25s", "5s").Should(Succeed()) + }) + + // Verifies that the service is reachable from the outside and the source IP is nos MASQ + // It also verifies that the service with external traffic policy=cluster can be accessed and the source IP is MASQ + It("Verify connectivity in external traffic policy=local", func() { + lbSvc := "nginx-loadbalancer-svc" + lbSvcExternalIPs, _ := e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvc) + lbSvcExt := "nginx-loadbalancer-svc-ext" + lbSvcExtExternalIPs, _ := e2e.FetchExternalIPs(tc.KubeConfigFile, lbSvcExt) + + // Verify connectivity to the external IP of the lbsvc service and the IP should be the flannel interface IP because of MASQ + for _, externalIP := range lbSvcExternalIPs { + Eventually(func() (string, error) { + cmd := "curl -s " + externalIP + ":81/ip" + return e2e.RunCommand(cmd) + }, "25s", "5s").Should(ContainSubstring("10.42")) } - } - - // Verify there is only one external-ip and it is matching the node IP - lbSvc := "nginx-loadbalancer-svc" - lbSvcExt := "nginx-loadbalancer-svc-ext" - Eventually(func() ([]string, error) { - return e2e.FetchExternalIPs(kubeConfigFile, lbSvc) - }, "25s", "5s").Should(HaveLen(2), "external IP count not equal to 2") - - Eventually(func(g Gomega) { - externalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvcExt) - g.Expect(externalIPs).To(HaveLen(1), "more than 1 exernalIP found") - g.Expect(externalIPs[0]).To(Equal(serverNodeIP), "external IP does not match servernodeIP") - }, "25s", "5s").Should(Succeed()) - }) - - // Verifies that the service is reachable from the outside and the source IP is nos MASQ - // It also verifies that the service with external traffic policy=cluster can be accessed and the source IP is MASQ - It("Verify connectivity in external traffic policy=local", func() { - lbSvc := "nginx-loadbalancer-svc" - lbSvcExternalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvc) - lbSvcExt := "nginx-loadbalancer-svc-ext" - lbSvcExtExternalIPs, _ := e2e.FetchExternalIPs(kubeConfigFile, lbSvcExt) - // Verify connectivity to the external IP of the lbsvc service and the IP should be the flannel interface IP because of MASQ - for _, externalIP := range lbSvcExternalIPs { + // Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP Eventually(func() (string, error) { - cmd := "curl -s " + externalIP + ":81/ip" + cmd := "curl -s " + lbSvcExtExternalIPs[0] + ":82/ip" return e2e.RunCommand(cmd) - }, "25s", "5s").Should(ContainSubstring("10.42")) - } - - // Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP - Eventually(func() (string, error) { - cmd := "curl -s " + lbSvcExtExternalIPs[0] + ":82/ip" - return e2e.RunCommand(cmd) - }, "25s", "5s").ShouldNot(ContainSubstring("10.42")) - - // Verify connectivity to the other nodeIP does not work because of external traffic policy=local - for _, externalIP := range lbSvcExternalIPs { - if externalIP == lbSvcExtExternalIPs[0] { - // This IP we already test and it shuold work - continue - } - Eventually(func() error { - cmd := "curl -s --max-time 5 " + externalIP + ":82/ip" - _, err := e2e.RunCommand(cmd) - return err - }, "40s", "5s").Should(MatchError(ContainSubstring("exit status"))) - } - }) + }, "25s", "5s").ShouldNot(ContainSubstring("10.42")) - // Verifies that the internal traffic policy=local is deployed - It("Verify internal traffic policy=local gets set up correctly", func() { - _, err := e2e.DeployWorkload("loadbalancer-intTrafficPol.yaml", kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred(), "loadbalancer-intTrafficPol not deployed") - _, err = e2e.DeployWorkload("pod_client.yaml", kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred(), "pod client not deployed") - - // Check that service exists - Eventually(func() (string, error) { - clusterIP, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-loadbalancer-svc-int", false) - return clusterIP, nil - }, "25s", "5s").Should(ContainSubstring("10.43")) - - // Check that client pods are running - Eventually(func() string { - pods, err := e2e.ParsePods(kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "client-deployment") { - return pod.Status + // Verify connectivity to the other nodeIP does not work because of external traffic policy=local + for _, externalIP := range lbSvcExternalIPs { + if externalIP == lbSvcExtExternalIPs[0] { + // This IP we already test and it shuold work + continue } + Eventually(func() error { + cmd := "curl -s --max-time 5 " + externalIP + ":82/ip" + _, err := e2e.RunCommand(cmd) + return err + }, "40s", "5s").Should(MatchError(ContainSubstring("exit status"))) } - return "" - }, "50s", "5s").Should(Equal("Running")) - }) + }) + + // Verifies that the internal traffic policy=local is deployed + It("Verify internal traffic policy=local gets set up correctly", func() { + _, err := tc.DeployWorkload("loadbalancer-intTrafficPol.yaml") + Expect(err).NotTo(HaveOccurred(), "loadbalancer-intTrafficPol not deployed") + _, err = tc.DeployWorkload("pod_client.yaml") + Expect(err).NotTo(HaveOccurred(), "pod client not deployed") - // Verifies that only the client pod running in the same node as the server pod can access the service - // It also verifies that the service with internal traffic policy=cluster can be accessed by both client pods - It("Verify connectivity in internal traffic policy=local", func() { - var clientPod1, clientPod1Node, clientPod1IP, clientPod2, clientPod2Node, clientPod2IP, serverNodeName string - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - Expect(err).NotTo(HaveOccurred(), "failed to parse pods") - for _, pod := range pods { - if strings.Contains(pod.Name, "test-loadbalancer-int") { - serverNodeName = pod.Node + // Check that service exists + Eventually(func() (string, error) { + clusterIP, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-loadbalancer-svc-int", false) + return clusterIP, nil + }, "25s", "5s").Should(ContainSubstring("10.43")) + + // Check that client pods are running + Eventually(func() string { + pods, err := tests.ParsePods(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "client-deployment") { + return string(pod.Status.Phase) + } } - if strings.Contains(pod.Name, "client-deployment") { - if clientPod1 == "" { - clientPod1 = pod.Name - clientPod1Node = pod.Node - clientPod1IP = pod.IP - } else { - clientPod2 = pod.Name - clientPod2Node = pod.Node - clientPod2IP = pod.IP + return "" + }, "50s", "5s").Should(Equal("Running")) + }) + + // Verifies that only the client pod running in the same node as the server pod can access the service + // It also verifies that the service with internal traffic policy=cluster can be accessed by both client pods + It("Verify connectivity in internal traffic policy=local", func() { + var clientPod1, clientPod1Node, clientPod1IP, clientPod2, clientPod2Node, clientPod2IP, serverNodeName string + Eventually(func(g Gomega) { + pods, err := tests.ParsePods(tc.KubeConfigFile) + Expect(err).NotTo(HaveOccurred(), "failed to parse pods") + for _, pod := range pods { + if strings.Contains(pod.Name, "test-loadbalancer-int") { + serverNodeName = pod.Spec.NodeName + } + if strings.Contains(pod.Name, "client-deployment") { + if clientPod1 == "" { + clientPod1 = pod.Name + clientPod1Node = pod.Spec.NodeName + clientPod1IP = pod.Status.PodIP + } else { + clientPod2 = pod.Name + clientPod2Node = pod.Spec.NodeName + clientPod2IP = pod.Status.PodIP + } } } + // As we need those variables for the connectivity test, let's check they are not emtpy + g.Expect(serverNodeName).ShouldNot(BeEmpty(), "server pod for internalTrafficPolicy=local not found") + g.Expect(clientPod1).ShouldNot(BeEmpty(), "client pod1 not found") + g.Expect(clientPod2).ShouldNot(BeEmpty(), "client pod2 not found") + g.Expect(clientPod1Node).ShouldNot(BeEmpty(), "client pod1 node not found") + g.Expect(clientPod2Node).ShouldNot(BeEmpty(), "client pod2 node not found") + g.Expect(clientPod1IP).ShouldNot(BeEmpty(), "client pod1 IP not found") + g.Expect(clientPod2IP).ShouldNot(BeEmpty(), "client pod2 IP not found") + }, "25s", "5s").Should(Succeed(), "All pod and names and IPs should be non-empty") + + // Check that clientPod1Node and clientPod2Node are not equal + Expect(clientPod1Node).ShouldNot(Equal(clientPod2Node)) + + var workingCmd, nonWorkingCmd string + if serverNodeName == clientPod1Node { + workingCmd = "kubectl exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" + nonWorkingCmd = "kubectl exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" } - // As we need those variables for the connectivity test, let's check they are not emtpy - g.Expect(serverNodeName).ShouldNot(BeEmpty(), "server pod for internalTrafficPolicy=local not found") - g.Expect(clientPod1).ShouldNot(BeEmpty(), "client pod1 not found") - g.Expect(clientPod2).ShouldNot(BeEmpty(), "client pod2 not found") - g.Expect(clientPod1Node).ShouldNot(BeEmpty(), "client pod1 node not found") - g.Expect(clientPod2Node).ShouldNot(BeEmpty(), "client pod2 node not found") - g.Expect(clientPod1IP).ShouldNot(BeEmpty(), "client pod1 IP not found") - g.Expect(clientPod2IP).ShouldNot(BeEmpty(), "client pod2 IP not found") - }, "25s", "5s").Should(Succeed(), "All pod and names and IPs should be non-empty") - - // Check that clientPod1Node and clientPod2Node are not equal - Expect(clientPod1Node).ShouldNot(Equal(clientPod2Node)) - - var workingCmd, nonWorkingCmd string - if serverNodeName == clientPod1Node { - workingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" - nonWorkingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" - } - if serverNodeName == clientPod2Node { - workingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" - nonWorkingCmd = "kubectl --kubeconfig=" + kubeConfigFile + " exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" - } - - Eventually(func() (string, error) { - out, err := e2e.RunCommand(workingCmd) - return out, err - }, "25s", "5s").Should(SatisfyAny( - ContainSubstring(clientPod1IP), - ContainSubstring(clientPod2IP), - )) - - // Check the non working command fails because of internal traffic policy=local - Eventually(func() bool { - _, err := e2e.RunCommand(nonWorkingCmd) - if err != nil && strings.Contains(err.Error(), "exit status") { - // Treat exit status as a successful condition - return true + if serverNodeName == clientPod2Node { + workingCmd = "kubectl exec " + clientPod2 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" + nonWorkingCmd = "kubectl exec " + clientPod1 + " -- curl -s --max-time 5 nginx-loadbalancer-svc-int:83/ip" } - return false - }, "40s", "5s").Should(BeTrue()) - // curling a service with internal traffic policy=cluster. It should work on both pods - for _, pod := range []string{clientPod1, clientPod2} { - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec " + pod + " -- curl -s --max-time 5 nginx-loadbalancer-svc:81/ip" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) - }, "20s", "5s").Should(SatisfyAny( + out, err := e2e.RunCommand(workingCmd) + return out, err + }, "25s", "5s").Should(SatisfyAny( ContainSubstring(clientPod1IP), ContainSubstring(clientPod2IP), )) - } - }) - // Set up the service manifest with loadBalancerSourceRanges - It("Applies service manifest with loadBalancerSourceRanges", func() { - // Define the service manifest with a placeholder for the IP - serviceManifest := ` + // Check the non working command fails because of internal traffic policy=local + Eventually(func() bool { + _, err := e2e.RunCommand(nonWorkingCmd) + if err != nil && strings.Contains(err.Error(), "exit status") { + // Treat exit status as a successful condition + return true + } + return false + }, "40s", "5s").Should(BeTrue()) + + // curling a service with internal traffic policy=cluster. It should work on both pods + for _, pod := range []string{clientPod1, clientPod2} { + cmd := "kubectl exec " + pod + " -- curl -s --max-time 5 nginx-loadbalancer-svc:81/ip" + Eventually(func() (string, error) { + return e2e.RunCommand(cmd) + }, "20s", "5s").Should(SatisfyAny( + ContainSubstring(clientPod1IP), + ContainSubstring(clientPod2IP), + )) + } + }) + + // Set up the service manifest with loadBalancerSourceRanges + It("Applies service manifest with loadBalancerSourceRanges", func() { + // Define the service manifest with a placeholder for the IP + serviceManifest := ` apiVersion: v1 kind: Service metadata: - name: nginx-loadbalancer-svc-ext-firewall +name: nginx-loadbalancer-svc-ext-firewall spec: - type: LoadBalancer - loadBalancerSourceRanges: - - {{.NodeIP}}/32 - ports: - - port: 82 - targetPort: 80 - protocol: TCP - name: http - selector: - k8s-app: nginx-app-loadbalancer-ext +type: LoadBalancer +loadBalancerSourceRanges: +- {{.NodeIP}}/32 +ports: +- port: 82 + targetPort: 80 + protocol: TCP + name: http +selector: + k8s-app: nginx-app-loadbalancer-ext ` - // Remove the service nginx-loadbalancer-svc-ext - _, err := e2e.RunCommand("kubectl --kubeconfig=" + kubeConfigFile + " delete svc nginx-loadbalancer-svc-ext") - Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext") - - // Parse and execute the template with the node IP - tmpl, err := template.New("service").Parse(serviceManifest) - Expect(err).NotTo(HaveOccurred()) - - var filledManifest strings.Builder - err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodes[0].InternalIP}) - Expect(err).NotTo(HaveOccurred()) - - // Write the filled manifest to a temporary file - tmpFile, err := os.CreateTemp("", "service-*.yaml") - Expect(err).NotTo(HaveOccurred()) - defer os.Remove(tmpFile.Name()) - - _, err = tmpFile.WriteString(filledManifest.String()) - Expect(err).NotTo(HaveOccurred()) - tmpFile.Close() - - // Apply the manifest using kubectl - applyCmd := fmt.Sprintf("kubectl --kubeconfig=%s apply -f %s", kubeConfigFile, tmpFile.Name()) - out, err := e2e.RunCommand(applyCmd) - Expect(err).NotTo(HaveOccurred(), out) - - Eventually(func() (string, error) { - clusterIP, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-loadbalancer-svc-ext-firewall", false) - return clusterIP, nil - }, "25s", "5s").Should(ContainSubstring("10.43")) - }) + // Remove the service nginx-loadbalancer-svc-ext + _, err := e2e.RunCommand("kubectl delete svc nginx-loadbalancer-svc-ext") + Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext") + + // Parse and execute the template with the node IP + tmpl, err := template.New("service").Parse(serviceManifest) + Expect(err).NotTo(HaveOccurred()) + + var filledManifest strings.Builder + err = tmpl.Execute(&filledManifest, struct{ NodeIP string }{NodeIP: nodes[0].InternalIP}) + Expect(err).NotTo(HaveOccurred()) + + // Write the filled manifest to a temporary file + tmpFile, err := os.CreateTemp("", "service-*.yaml") + Expect(err).NotTo(HaveOccurred()) + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.WriteString(filledManifest.String()) + Expect(err).NotTo(HaveOccurred()) + tmpFile.Close() + + // Apply the manifest using kubectl + applyCmd := fmt.Sprintf("kubectl apply -f %s", tmpFile.Name()) + out, err := e2e.RunCommand(applyCmd) + Expect(err).NotTo(HaveOccurred(), out) - // Verify that only the allowed node can curl. That node should be able to curl both externalIPs (i.e. node.InternalIP) - It("Verify firewall is working", func() { - for _, node := range nodes { - // Verify connectivity from nodes[0] works because we passed its IP to the loadBalancerSourceRanges Eventually(func() (string, error) { - cmd := "curl -s --max-time 5 " + node.InternalIP + ":82" - return e2e.RunCmdOnNode(cmd, nodes[0].Name) - }, "40s", "5s").Should(ContainSubstring("Welcome to nginx")) - - // Verify connectivity from nodes[1] fails because we did not pass its IP to the loadBalancerSourceRanges - Eventually(func(g Gomega) error { - cmd := "curl -s --max-time 5 " + node.InternalIP + ":82" - _, err := e2e.RunCmdOnNode(cmd, nodes[1].Name) - return err - }, "40s", "5s").Should(MatchError(ContainSubstring("exit status"))) - } + clusterIP, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-loadbalancer-svc-ext-firewall", false) + return clusterIP, nil + }, "25s", "5s").Should(ContainSubstring("10.43")) + }) + + // Verify that only the allowed node can curl. That node should be able to curl both externalIPs (i.e. node.InternalIP) + It("Verify firewall is working", func() { + for _, node := range nodes { + var sNode, aNode e2e.VagrantNode + for _, n := range tc.Servers { + if n.String() == nodes[0].Name { + sNode = n + } + } + for _, n := range tc.Agents { + if n.String() == nodes[1].Name { + aNode = n + } + } + + // Verify connectivity from nodes[0] works because we passed its IP to the loadBalancerSourceRanges + Eventually(func() (string, error) { + cmd := "curl -s --max-time 5 " + node.InternalIP + ":82" + return sNode.RunCmdOnNode(cmd) + }, "40s", "5s").Should(ContainSubstring("Welcome to nginx")) + + // Verify connectivity from nodes[1] fails because we did not pass its IP to the loadBalancerSourceRanges + Eventually(func(g Gomega) error { + cmd := "curl -s --max-time 5 " + node.InternalIP + ":82" + _, err := aNode.RunCmdOnNode(cmd) + return err + }, "40s", "5s").Should(MatchError(ContainSubstring("exit status"))) + } + }) }) }) @@ -348,12 +350,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/tailscale/tailscale_test.go b/tests/e2e/tailscale/tailscale_test.go index 449840e4f990..67adbf9834d7 100644 --- a/tests/e2e/tailscale/tailscale_test.go +++ b/tests/e2e/tailscale/tailscale_test.go @@ -25,11 +25,7 @@ func Test_E2ETailscale(t *testing.T) { RunSpecs(t, "Tailscale Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -38,61 +34,59 @@ var _ = Describe("Verify Tailscale Configuration", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) Expect(err).NotTo(HaveOccurred()) }) // Server node needs to be ready before we continue It("Checks Node Status", func() { Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "300s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) + _, err := e2e.ParseNodes(tc.KubeConfigFile, true) Expect(err).NotTo(HaveOccurred()) }) It("Change agent's config", func() { - nodeIPs, _ := e2e.GetNodeIPs(kubeConfigFile) + nodeIPs, _ := e2e.GetNodeIPs(tc.KubeConfigFile) cmd := fmt.Sprintf("sudo sed -i 's/TAILSCALEIP/%s/g' /etc/rancher/k3s/config.yaml", nodeIPs[0].IPv4) - for _, agent := range agentNodeNames { - _, err := e2e.RunCmdOnNode(cmd, agent) + for _, agent := range tc.Agents { + _, err := agent.RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) } }) It("Restart agents", func() { - err := e2e.RestartCluster(agentNodeNames) + err := e2e.RestartCluster(tc.Agents) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) }) It("Checks Node Status", func() { Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) g.Expect(len(nodes)).To(Equal(*agentCount + *serverCount)) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "300s", "5s").Should(Succeed()) - _, err := e2e.ParseNodes(kubeConfigFile, true) + _, err := e2e.ParseNodes(tc.KubeConfigFile, true) Expect(err).NotTo(HaveOccurred()) }) It("Verifies that server and agent have a tailscale IP as nodeIP", func() { - nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + nodeIPs, err := e2e.GetNodeIPs(tc.KubeConfigFile) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeIPs { Expect(node.IPv4).Should(ContainSubstring("100.")) @@ -102,8 +96,8 @@ var _ = Describe("Verify Tailscale Configuration", Ordered, func() { It("Verify routing is correct and uses tailscale0 interface for internode traffic", func() { // table 52 is the one configured by tailscale cmd := "ip route show table 52" - for _, node := range append(serverNodeNames, agentNodeNames...) { - output, err := e2e.RunCmdOnNode(cmd, node) + for _, node := range append(tc.Servers, tc.Agents...) { + output, err := node.RunCmdOnNode(cmd) fmt.Println(err) Expect(err).NotTo(HaveOccurred()) Expect(output).Should(ContainSubstring("10.42.")) @@ -119,12 +113,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index c50fb0c398d9..4b01e493bb9f 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -18,6 +18,39 @@ import ( "golang.org/x/sync/errgroup" ) +// defining the VagrantNode type allows methods like RunCmdOnNode to be defined on it. +// This makes test code more consistent, as similar functions can exists in Docker and E2E tests. +type VagrantNode string + +func (v VagrantNode) String() string { + return string(v) +} + +func VagrantSlice(v []VagrantNode) []string { + nodes := make([]string, 0, len(v)) + for _, node := range v { + nodes = append(nodes, node.String()) + } + return nodes +} + +type TestConfig struct { + Hardened bool + KubeConfigFile string + Servers []VagrantNode + Agents []VagrantNode +} + +func (tc *TestConfig) Status() string { + sN := strings.Join(VagrantSlice(tc.Servers), " ") + aN := strings.Join(VagrantSlice(tc.Agents), " ") + hardened := "" + if tc.Hardened { + hardened = "Hardened: true\n" + } + return fmt.Sprintf("%sKubeconfig: %s\nServers Nodes: %s\nAgents Nodes: %s\n)", hardened, tc.KubeConfigFile, sN, aN) +} + type Node struct { Name string Status string @@ -30,18 +63,8 @@ func (n Node) String() string { return fmt.Sprintf("Node (name: %s, status: %s, roles: %s)", n.Name, n.Status, n.Roles) } -type Pod struct { - NameSpace string - Name string - Ready string - Status string - Restarts string - IP string - Node string -} - type NodeError struct { - Node string + Node VagrantNode Cmd string Err error } @@ -65,7 +88,7 @@ func (ne *NodeError) Unwrap() error { return ne.Err } -func newNodeError(cmd, node string, err error) *NodeError { +func newNodeError(cmd string, node VagrantNode, err error) *NodeError { return &NodeError{ Cmd: cmd, Node: node, @@ -73,28 +96,18 @@ func newNodeError(cmd, node string, err error) *NodeError { } } -func CountOfStringInSlice(str string, pods []Pod) int { - count := 0 - for _, pod := range pods { - if strings.Contains(pod.Name, str) { - count++ - } - } - return count -} - // genNodeEnvs generates the node and testing environment variables for vagrant up -func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string, string) { - serverNodeNames := make([]string, serverCount) +func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]VagrantNode, []VagrantNode, string) { + serverNodes := make([]VagrantNode, serverCount) for i := 0; i < serverCount; i++ { - serverNodeNames[i] = "server-" + strconv.Itoa(i) + serverNodes[i] = VagrantNode("server-" + strconv.Itoa(i)) } - agentNodeNames := make([]string, agentCount) + agentNodes := make([]VagrantNode, agentCount) for i := 0; i < agentCount; i++ { - agentNodeNames[i] = "agent-" + strconv.Itoa(i) + agentNodes[i] = VagrantNode("agent-" + strconv.Itoa(i)) } - nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ") + nodeRoles := strings.Join(VagrantSlice(serverNodes), " ") + " " + strings.Join(VagrantSlice(agentNodes), " ") nodeRoles = strings.TrimSpace(nodeRoles) nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+agentCount) @@ -102,12 +115,12 @@ func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string nodeEnvs := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s"`, nodeRoles, nodeBoxes) - return serverNodeNames, agentNodeNames, nodeEnvs + return serverNodes, agentNodes, nodeEnvs } -func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) { +func CreateCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, error) { - serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount) + serverNodes, agentNodes, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount) var testOptions string for _, env := range os.Environ() { @@ -116,16 +129,16 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri } } // Bring up the first server node - cmd := fmt.Sprintf(`%s %s vagrant up %s &> vagrant.log`, nodeEnvs, testOptions, serverNodeNames[0]) + cmd := fmt.Sprintf(`%s %s vagrant up %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0]) fmt.Println(cmd) if _, err := RunCommand(cmd); err != nil { - return nil, nil, newNodeError(cmd, serverNodeNames[0], err) + return nil, newNodeError(cmd, serverNodes[0], err) } // Bring up the rest of the nodes in parallel errg, _ := errgroup.WithContext(context.Background()) - for _, node := range append(serverNodeNames[1:], agentNodeNames...) { - cmd := fmt.Sprintf(`%s %s vagrant up %s &>> vagrant.log`, nodeEnvs, testOptions, node) + for _, node := range append(serverNodes[1:], agentNodes...) { + cmd := fmt.Sprintf(`%s %s vagrant up %s &>> vagrant.log`, nodeEnvs, testOptions, node.String()) fmt.Println(cmd) errg.Go(func() error { if _, err := RunCommand(cmd); err != nil { @@ -134,26 +147,47 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri return nil }) // We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster - if strings.Contains(node, "agent") { + if strings.Contains(node.String(), "agent") { time.Sleep(5 * time.Second) } else { time.Sleep(30 * time.Second) } } if err := errg.Wait(); err != nil { - return nil, nil, err + return nil, err } - return serverNodeNames, agentNodeNames, nil + // For startup test, we don't start the cluster, so check first before + // generating the kubeconfig file + var kubeConfigFile string + res, err := serverNodes[0].RunCmdOnNode("systemctl is-active k3s") + if err != nil { + return nil, err + } + if !strings.Contains(res, "inactive") && strings.Contains(res, "active") { + kubeConfigFile, err = GenKubeConfigFile(serverNodes[0].String()) + if err != nil { + return nil, err + } + } + + tc := &TestConfig{ + KubeConfigFile: kubeConfigFile, + Servers: serverNodes, + Agents: agentNodes, + } + + return tc, nil } -func scpK3sBinary(nodeNames []string) error { +func scpK3sBinary(nodeNames []VagrantNode) error { for _, node := range nodeNames { - cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node) + cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node.String()) if _, err := RunCommand(cmd); err != nil { return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) } - if _, err := RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil { + cmd = "vagrant ssh " + node.String() + " -c \"sudo mv /tmp/k3s /usr/local/bin/\"" + if _, err := RunCommand(cmd); err != nil { return err } } @@ -162,10 +196,9 @@ func scpK3sBinary(nodeNames []string) error { // CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for // this function to work. The binary is deployed as an airgapped install of k3s on the VMs. -// This is intended only for local testing purposes when writing a new E2E test. -func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) { +func CreateLocalCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, error) { - serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount) + serverNodes, agentNodes, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount) var testOptions string var cmd string @@ -179,15 +212,15 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [ // Provision the first server node. In GitHub Actions, this also imports the VM image into libvirt, which // takes time and can cause the next vagrant up to fail if it is not given enough time to complete. - cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, serverNodeNames[0]) + cmd = fmt.Sprintf(`%s %s vagrant up --no-tty --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0]) fmt.Println(cmd) if _, err := RunCommand(cmd); err != nil { - return nil, nil, newNodeError(cmd, serverNodeNames[0], err) + return nil, newNodeError(cmd, serverNodes[0], err) } // Bring up the rest of the nodes in parallel errg, _ := errgroup.WithContext(context.Background()) - for _, node := range append(serverNodeNames[1:], agentNodeNames...) { + for _, node := range append(serverNodes[1:], agentNodes...) { cmd := fmt.Sprintf(`%s %s vagrant up --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node) errg.Go(func() error { if _, err := RunCommand(cmd); err != nil { @@ -199,15 +232,15 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [ time.Sleep(10 * time.Second) } if err := errg.Wait(); err != nil { - return nil, nil, err + return nil, err } - if err := scpK3sBinary(append(serverNodeNames, agentNodeNames...)); err != nil { - return nil, nil, err + if err := scpK3sBinary(append(serverNodes, agentNodes...)); err != nil { + return nil, err } // Install K3s on all nodes in parallel errg, _ = errgroup.WithContext(context.Background()) - for _, node := range append(serverNodeNames, agentNodeNames...) { + for _, node := range append(serverNodes, agentNodes...) { cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node) errg.Go(func() error { if _, err := RunCommand(cmd); err != nil { @@ -219,15 +252,34 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [ time.Sleep(20 * time.Second) } if err := errg.Wait(); err != nil { - return nil, nil, err + return nil, err } - return serverNodeNames, agentNodeNames, nil + // For startup test, we don't start the cluster, so check first before generating the kubeconfig file. + // Systemctl returns a exit code of 3 when the service is inactive, so we don't check for errors + // on the command itself. + var kubeConfigFile string + var err error + res, _ := serverNodes[0].RunCmdOnNode("systemctl is-active k3s") + if !strings.Contains(res, "inactive") && strings.Contains(res, "active") { + kubeConfigFile, err = GenKubeConfigFile(serverNodes[0].String()) + if err != nil { + return nil, err + } + } + + tc := &TestConfig{ + KubeConfigFile: kubeConfigFile, + Servers: serverNodes, + Agents: agentNodes, + } + + return tc, nil } -func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) { +func (tc TestConfig) DeployWorkload(workload string) (string, error) { resourceDir := "../amd64_resource_files" - if hardened { + if tc.Hardened { resourceDir = "../cis_amd64_resource_files" } files, err := os.ReadDir(resourceDir) @@ -239,7 +291,7 @@ func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) for _, f := range files { filename := filepath.Join(resourceDir, f.Name()) if strings.TrimSpace(f.Name()) == workload { - cmd := "kubectl apply -f " + filename + " --kubeconfig=" + kubeconfig + cmd := "kubectl apply -f " + filename + " --kubeconfig=" + tc.KubeConfigFile return RunCommand(cmd) } } @@ -298,12 +350,15 @@ func FetchIngressIP(kubeconfig string) ([]string, error) { } ingressIP := strings.Trim(res, " ") ingressIPs := strings.Split(ingressIP, " ") + if len(ingressIPs) == 0 { + return nil, errors.New("no ingress IP found") + } return ingressIPs, nil } -func FetchNodeExternalIP(nodename string) (string, error) { - cmd := "vagrant ssh " + nodename + " -c \"ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1\"" - ipaddr, err := RunCommand(cmd) +func (v VagrantNode) FetchNodeExternalIP() (string, error) { + cmd := "ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1" + ipaddr, err := v.RunCmdOnNode(cmd) if err != nil { return "", err } @@ -313,9 +368,10 @@ func FetchNodeExternalIP(nodename string) (string, error) { return nodeip, nil } -func GenKubeConfigFile(serverName string) (string, error) { - kubeConfigFile := fmt.Sprintf("kubeconfig-%s", serverName) - cmd := fmt.Sprintf("vagrant scp %s:/etc/rancher/k3s/k3s.yaml ./%s", serverName, kubeConfigFile) +// GenKubeConfigFile extracts the kubeconfig from the given node and modifies it for use outside the VM. +func GenKubeConfigFile(nodeName string) (string, error) { + kubeConfigFile := fmt.Sprintf("kubeconfig-%s", nodeName) + cmd := fmt.Sprintf("vagrant scp %s:/etc/rancher/k3s/k3s.yaml ./%s", nodeName, kubeConfigFile) _, err := RunCommand(cmd) if err != nil { return "", err @@ -328,7 +384,8 @@ func GenKubeConfigFile(serverName string) (string, error) { re := regexp.MustCompile(`(?m)==> vagrant:.*\n`) modifiedKubeConfig := re.ReplaceAllString(string(kubeConfig), "") - nodeIP, err := FetchNodeExternalIP(serverName) + vNode := VagrantNode(nodeName) + nodeIP, err := vNode.FetchNodeExternalIP() if err != nil { return "", err } @@ -359,16 +416,16 @@ func GenReport(specReport ginkgo.SpecReport) { fmt.Printf("%s", status) } -func GetJournalLogs(node string) (string, error) { +func (v VagrantNode) GetJournalLogs() (string, error) { cmd := "journalctl -u k3s* --no-pager" - return RunCmdOnNode(cmd, node) + return v.RunCmdOnNode(cmd) } -func TailJournalLogs(lines int, nodes []string) string { +func TailJournalLogs(lines int, nodes []VagrantNode) string { logs := &strings.Builder{} for _, node := range nodes { cmd := fmt.Sprintf("journalctl -u k3s* --no-pager --lines=%d", lines) - if l, err := RunCmdOnNode(cmd, node); err != nil { + if l, err := node.RunCmdOnNode(cmd); err != nil { fmt.Fprintf(logs, "** failed to read journald log for node %s ***\n%v\n", node, err) } else { fmt.Fprintf(logs, "** journald log for node %s ***\n%s\n", node, l) @@ -379,14 +436,14 @@ func TailJournalLogs(lines int, nodes []string) string { // SaveJournalLogs saves the journal logs of each node to a -jlog.txt file. // When used in GHA CI, the logs are uploaded as an artifact on failure. -func SaveJournalLogs(nodeNames []string) error { - for _, node := range nodeNames { - lf, err := os.Create(node + "-jlog.txt") +func SaveJournalLogs(nodes []VagrantNode) error { + for _, node := range nodes { + lf, err := os.Create(node.String() + "-jlog.txt") if err != nil { return err } defer lf.Close() - logs, err := GetJournalLogs(node) + logs, err := node.GetJournalLogs() if err != nil { return err } @@ -397,11 +454,11 @@ func SaveJournalLogs(nodeNames []string) error { return nil } -func GetConfig(nodes []string) string { +func GetConfig(nodes []VagrantNode) string { config := &strings.Builder{} for _, node := range nodes { cmd := "tar -Pc /etc/rancher/k3s/ | tar -vxPO" - if c, err := RunCmdOnNode(cmd, node); err != nil { + if c, err := node.RunCmdOnNode(cmd); err != nil { fmt.Fprintf(config, "** failed to get config for node %s ***\n%v\n", node, err) } else { fmt.Fprintf(config, "** config for node %s ***\n%s\n", node, c) @@ -416,7 +473,7 @@ func GetVagrantLog(cErr error) string { var nodeErr *NodeError nodeJournal := "" if errors.As(cErr, &nodeErr) { - nodeJournal, _ = GetJournalLogs(nodeErr.Node) + nodeJournal, _ = nodeErr.Node.GetJournalLogs() nodeJournal = "\nNode Journal Logs:\n" + nodeJournal } @@ -464,51 +521,17 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) { return nodes, nil } -func formatPods(input string) ([]Pod, error) { - pods := make([]Pod, 0, 10) - input = strings.TrimSpace(input) - split := strings.Split(input, "\n") - for _, rec := range split { - fields := strings.Fields(string(rec)) - if len(fields) < 8 { - return nil, fmt.Errorf("invalid pod record: %s", rec) - } - pod := Pod{ - NameSpace: fields[0], - Name: fields[1], - Ready: fields[2], - Status: fields[3], - Restarts: fields[4], - IP: fields[6], - Node: fields[7], - } - pods = append(pods, pod) - } - return pods, nil -} - -func ParsePods(kubeConfig string, print bool) ([]Pod, error) { - podList := "" - +func DumpPods(kubeConfig string) { cmd := "kubectl get pods -o wide --no-headers -A" res, _ := RunCommand(cmd) - podList = strings.TrimSpace(res) - - pods, err := formatPods(res) - if err != nil { - return nil, err - } - if print { - fmt.Println(podList) - } - return pods, nil + fmt.Println(strings.TrimSpace(res)) } // RestartCluster restarts the k3s service on each node given -func RestartCluster(nodeNames []string) error { - for _, nodeName := range nodeNames { +func RestartCluster(nodes []VagrantNode) error { + for _, node := range nodes { cmd := "systemctl restart k3s* --all" - if _, err := RunCmdOnNode(cmd, nodeName); err != nil { + if _, err := node.RunCmdOnNode(cmd); err != nil { return err } } @@ -516,13 +539,13 @@ func RestartCluster(nodeNames []string) error { } // StartCluster starts the k3s service on each node given -func StartCluster(nodeNames []string) error { - for _, nodeName := range nodeNames { +func StartCluster(nodes []VagrantNode) error { + for _, node := range nodes { cmd := "systemctl start k3s" - if strings.Contains(nodeName, "agent") { + if strings.Contains(node.String(), "agent") { cmd += "-agent" } - if _, err := RunCmdOnNode(cmd, nodeName); err != nil { + if _, err := node.RunCmdOnNode(cmd); err != nil { return err } } @@ -530,10 +553,10 @@ func StartCluster(nodeNames []string) error { } // StopCluster starts the k3s service on each node given -func StopCluster(nodeNames []string) error { - for _, nodeName := range nodeNames { +func StopCluster(nodes []VagrantNode) error { + for _, node := range nodes { cmd := "systemctl stop k3s*" - if _, err := RunCmdOnNode(cmd, nodeName); err != nil { + if _, err := node.RunCmdOnNode(cmd); err != nil { return err } } @@ -541,18 +564,18 @@ func StopCluster(nodeNames []string) error { } // RunCmdOnNode executes a command from within the given node as sudo -func RunCmdOnNode(cmd string, nodename string) (string, error) { +func (v VagrantNode) RunCmdOnNode(cmd string) (string, error) { injectEnv := "" if _, ok := os.LookupEnv("E2E_GOCOVER"); ok && strings.HasPrefix(cmd, "k3s") { injectEnv = "GOCOVERDIR=/tmp/k3scov " } - runcmd := "vagrant ssh " + nodename + " -c \"sudo " + injectEnv + cmd + "\"" + runcmd := "vagrant ssh --no-tty " + v.String() + " -c \"sudo " + injectEnv + cmd + "\"" out, err := RunCommand(runcmd) // On GHA CI we see warnings about "[fog][WARNING] Unrecognized arguments: libvirt_ip_command" // these are added to the command output and need to be removed out = strings.ReplaceAll(out, "[fog][WARNING] Unrecognized arguments: libvirt_ip_command\n", "") if err != nil { - return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, nodename, out, err) + return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, v.String(), out, err) } return out, nil } @@ -569,16 +592,16 @@ func RunCommand(cmd string) (string, error) { return string(out), err } -func UpgradeCluster(nodeNames []string, local bool) error { +func UpgradeCluster(nodes []VagrantNode, local bool) error { upgradeVersion := "E2E_RELEASE_CHANNEL=commit" if local { - if err := scpK3sBinary(nodeNames); err != nil { + if err := scpK3sBinary(nodes); err != nil { return err } upgradeVersion = "E2E_RELEASE_VERSION=skip" } - for _, nodeName := range nodeNames { - cmd := upgradeVersion + " vagrant provision " + nodeName + for _, node := range nodes { + cmd := upgradeVersion + " vagrant provision " + node.String() if out, err := RunCommand(cmd); err != nil { fmt.Println("Error Upgrading Cluster", out) return err @@ -587,16 +610,16 @@ func UpgradeCluster(nodeNames []string, local bool) error { return nil } -func GetCoverageReport(nodeNames []string) error { +func GetCoverageReport(nodes []VagrantNode) error { if os.Getenv("E2E_GOCOVER") == "" { return nil } covDirs := []string{} - for _, nodeName := range nodeNames { - covDir := nodeName + "-cov" + for _, node := range nodes { + covDir := node.String() + "-cov" covDirs = append(covDirs, covDir) os.MkdirAll(covDir, 0755) - cmd := "vagrant scp " + nodeName + ":/tmp/k3scov/* " + covDir + cmd := "vagrant scp " + node.String() + ":/tmp/k3scov/* " + covDir if _, err := RunCommand(cmd); err != nil { return err } @@ -627,19 +650,29 @@ func GetCoverageReport(nodeNames []string) error { return nil } -// getPodIPs returns the IPs of all pods +// GetDaemonsetReady returns the number of ready pods for the given daemonset +func GetDaemonsetReady(daemonset string, kubeConfigFile string) (int, error) { + cmd := "kubectl get ds " + daemonset + " -o jsonpath='{range .items[*]}{.status.numberReady}' --kubeconfig=" + kubeConfigFile + out, err := RunCommand(cmd) + if err != nil { + return 0, err + } + return strconv.Atoi(out) +} + +// GetPodIPs returns the IPs of all pods func GetPodIPs(kubeConfigFile string) ([]ObjIP, error) { cmd := `kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile return GetObjIPs(cmd) } -// getNodeIPs returns the IPs of all nodes +// GetNodeIPs returns the IPs of all nodes func GetNodeIPs(kubeConfigFile string) ([]ObjIP, error) { cmd := `kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}' --kubeconfig=` + kubeConfigFile return GetObjIPs(cmd) } -// getObjIPs executes a command to collect IPs +// GetObjIPs executes a command to collect IPs func GetObjIPs(cmd string) ([]ObjIP, error) { var objIPs []ObjIP res, err := RunCommand(cmd) diff --git a/tests/e2e/token/token_test.go b/tests/e2e/token/token_test.go index 3b3c011d6ae7..ebfd5492898c 100644 --- a/tests/e2e/token/token_test.go +++ b/tests/e2e/token/token_test.go @@ -4,10 +4,10 @@ import ( "flag" "fmt" "os" - "strings" "testing" "time" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -33,11 +33,7 @@ func Test_E2EToken(t *testing.T) { RunSpecs(t, "SnapshotRestore Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -46,66 +42,54 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func() It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "360s", "5s").Should(Succeed()) + e2e.DumpPods(tc.KubeConfigFile) }) var permToken string It("Creates a permanent agent token", func() { permToken = "perage.s0xt4u0hl5guoyi6" - _, err := e2e.RunCmdOnNode("k3s token create --ttl=0 "+permToken, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("k3s token create --ttl=0 " + permToken) Expect(err).NotTo(HaveOccurred()) - res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s token list") Expect(err).NotTo(HaveOccurred()) Expect(res).To(MatchRegexp(`perage\s+\s+`)) }) It("Joins an agent with the permanent token", func() { cmd := fmt.Sprintf("echo 'token: %s' | sudo tee -a /etc/rancher/k3s/config.yaml > /dev/null", permToken) - _, err := e2e.RunCmdOnNode(cmd, agentNodeNames[0]) + _, err := tc.Agents[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) - _, err = e2e.RunCmdOnNode("systemctl start k3s-agent", agentNodeNames[0]) + _, err = tc.Agents[0].RunCmdOnNode("systemctl start k3s-agent") Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 1)) + g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 1)) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } @@ -114,38 +98,38 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func() }) Context("Agent joins with temporary token:", func() { It("Creates a 20s agent token", func() { - _, err := e2e.RunCmdOnNode("k3s token create --ttl=20s 20sect.jxnpve6vg8dqm895", serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("k3s token create --ttl=20s 20sect.jxnpve6vg8dqm895") Expect(err).NotTo(HaveOccurred()) - res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s token list") Expect(err).NotTo(HaveOccurred()) Expect(res).To(MatchRegexp(`20sect\s+[0-9]{2}s`)) }) It("Cleans up 20s token automatically", func() { Eventually(func() (string, error) { - return e2e.RunCmdOnNode("k3s token list", serverNodeNames[0]) + return tc.Servers[0].RunCmdOnNode("k3s token list") }, "25s", "5s").ShouldNot(ContainSubstring("20sect")) }) var tempToken string It("Creates a 10m agent token", func() { tempToken = "10mint.ida18trbbk43szwk" - _, err := e2e.RunCmdOnNode("k3s token create --ttl=10m "+tempToken, serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("k3s token create --ttl=10m " + tempToken) Expect(err).NotTo(HaveOccurred()) time.Sleep(2 * time.Second) - res, err := e2e.RunCmdOnNode("k3s token list", serverNodeNames[0]) + res, err := tc.Servers[0].RunCmdOnNode("k3s token list") Expect(err).NotTo(HaveOccurred()) Expect(res).To(MatchRegexp(`10mint\s+[0-9]m`)) }) It("Joins an agent with the 10m token", func() { cmd := fmt.Sprintf("echo 'token: %s' | sudo tee -a /etc/rancher/k3s/config.yaml > /dev/null", tempToken) - _, err := e2e.RunCmdOnNode(cmd, agentNodeNames[1]) + _, err := tc.Agents[1].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) - _, err = e2e.RunCmdOnNode("systemctl start k3s-agent", agentNodeNames[1]) + _, err = tc.Agents[1].RunCmdOnNode("systemctl start k3s-agent") Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 2)) + g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 2)) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } @@ -155,23 +139,23 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func() Context("Rotate server bootstrap token", func() { serverToken := "1234" It("Creates a new server token", func() { - Expect(e2e.RunCmdOnNode("k3s token rotate -t vagrant --new-token="+serverToken, serverNodeNames[0])). + Expect(tc.Servers[0].RunCmdOnNode("k3s token rotate -t vagrant --new-token=" + serverToken)). To(ContainSubstring("Token rotated, restart k3s nodes with new token")) }) It("Restarts servers with the new token", func() { cmd := fmt.Sprintf("sed -i 's/token:.*/token: %s/' /etc/rancher/k3s/config.yaml", serverToken) - for _, node := range serverNodeNames { - _, err := e2e.RunCmdOnNode(cmd, node) + for _, node := range tc.Servers { + _, err := node.RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) } - for _, node := range serverNodeNames { - _, err := e2e.RunCmdOnNode("systemctl restart k3s", node) + for _, node := range tc.Servers { + _, err := node.RunCmdOnNode("systemctl restart k3s") Expect(err).NotTo(HaveOccurred()) } Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 2)) + g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 2)) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } @@ -179,15 +163,15 @@ var _ = Describe("Use the token CLI to create and join agents", Ordered, func() }) It("Rejoins an agent with the new server token", func() { cmd := fmt.Sprintf("sed -i 's/token:.*/token: %s/' /etc/rancher/k3s/config.yaml", serverToken) - _, err := e2e.RunCmdOnNode(cmd, agentNodeNames[0]) + _, err := tc.Agents[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) - _, err = e2e.RunCmdOnNode("systemctl restart k3s-agent", agentNodeNames[0]) + _, err = tc.Agents[0].RunCmdOnNode("systemctl restart k3s-agent") Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(len(nodes)).Should(Equal(len(serverNodeNames) + 2)) + g.Expect(len(nodes)).Should(Equal(len(tc.Servers) + 2)) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } @@ -203,12 +187,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/upgradecluster/upgradecluster_test.go b/tests/e2e/upgradecluster/upgradecluster_test.go index fab93a6bbd89..8b248d28bb09 100644 --- a/tests/e2e/upgradecluster/upgradecluster_test.go +++ b/tests/e2e/upgradecluster/upgradecluster_test.go @@ -4,9 +4,9 @@ import ( "flag" "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -36,11 +36,7 @@ func Test_E2EUpgradeValidation(t *testing.T) { RunSpecs(t, "Upgrade Cluster Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -48,72 +44,61 @@ var _ = Describe("Verify Upgrade", Ordered, func() { Context("Cluster :", func() { It("Starts up with no issues", func() { var err error - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + tc.Hardened = *hardened + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) Expect(err).NotTo(HaveOccurred()) }) - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + It("Checks node and pod status", func() { + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Verifies ClusterIP Service", func() { - _, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("clusterip.yaml") Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) - clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false) + clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false) cmd = "curl -L --insecure http://" + clusterip + "/name.html" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, nodeName) + return node.RunCmdOnNode(cmd) }, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) } }) It("Verifies NodePort Service", func() { - _, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("nodeport.yaml") Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - for _, nodeName := range serverNodeNames { - nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName) - cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" + for _, node := range tc.Servers { + nodeExternalIP, _ := node.FetchNodeExternalIP() + cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" nodeport, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) - cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") @@ -127,15 +112,15 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }) It("Verifies LoadBalancer Service", func() { - _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("loadbalancer.yaml") Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed") - for _, nodeName := range serverNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) - cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" + for _, node := range tc.Servers { + ip, _ := node.FetchNodeExternalIP() + cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" port, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) @@ -148,11 +133,11 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }) It("Verifies Ingress", func() { - _, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("ingress.yaml") Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - for _, nodeName := range serverNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) + for _, node := range tc.Servers { + ip, _ := node.FetchNodeExternalIP() cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html" Eventually(func() (string, error) { return e2e.RunCommand(cmd) @@ -161,42 +146,38 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }) It("Verifies Daemonset", func() { - _, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("daemonset.yaml") Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - nodes, _ := e2e.ParseNodes(kubeConfigFile, false) //nodes := + nodes, _ := e2e.ParseNodes(tc.KubeConfigFile, false) Eventually(func(g Gomega) { - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - fmt.Println("POD COUNT") - fmt.Println(count) - fmt.Println("NODE COUNT") - fmt.Println(len(nodes)) - g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") + count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count") }, "240s", "10s").Should(Succeed()) }) It("Verifies dns access", func() { - _, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("dnsutils.yaml") Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") Eventually(func() (string, error) { - cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("dnsutils")) - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" + cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" Eventually(func() (string, error) { return e2e.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local")) }) It("Verifies Local Path Provisioner storage ", func() { - _, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("local-path-provisioner.yaml") Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed") Eventually(func(g Gomega) { - cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) fmt.Println(res) @@ -205,7 +186,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }, "240s", "2s").Should(Succeed()) Eventually(func(g Gomega) { - cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) fmt.Println(res) @@ -214,25 +195,25 @@ var _ = Describe("Verify Upgrade", Ordered, func() { g.Expect(res).Should(ContainSubstring("Running")) }, "420s", "2s").Should(Succeed()) - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" + cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" res, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) fmt.Println("Data stored in pvc: local-path-test") - cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile + cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeConfigFile res, err = e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) - _, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) + _, err = tc.DeployWorkload("local-path-provisioner.yaml") Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed") Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("local-path-provisioner")) Eventually(func(g Gomega) { - cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) fmt.Println(res) @@ -242,72 +223,62 @@ var _ = Describe("Verify Upgrade", Ordered, func() { // Check data after re-creation Eventually(func() (string, error) { - cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test" + cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeConfigFile + " -- cat /data/test" return e2e.RunCommand(cmd) }, "180s", "2s").Should(ContainSubstring("local-path-test"), "Failed to retrieve data from pvc") }) It("Upgrades with no issues", func() { var err error - Expect(e2e.UpgradeCluster(append(serverNodeNames, agentNodeNames...), *local)).To(Succeed()) - Expect(e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.UpgradeCluster(append(tc.Servers, tc.Agents...), *local)).To(Succeed()) + Expect(e2e.RestartCluster(append(tc.Servers, tc.Agents...))).To(Succeed()) fmt.Println("CLUSTER UPGRADED") - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + tc.KubeConfigFile, err = e2e.GenKubeConfigFile(tc.Servers[0].String()) Expect(err).NotTo(HaveOccurred()) }) It("After upgrade Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") + By("Fetching Nodes status") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "420s", "5s").Should(Succeed()) - e2e.ParseNodes(kubeConfigFile, true) + e2e.ParseNodes(tc.KubeConfigFile, true) - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed")) - } else { - g.Expect(pod.Status).Should(Equal("Running")) - } - } - }, "420s", "5s").Should(Succeed()) - e2e.ParsePods(kubeConfigFile, true) + By("Fetching Pod status") + tests.AllPodsUp(tc.KubeConfigFile) + e2e.DumpPods(tc.KubeConfigFile) }) It("After upgrade verifies ClusterIP Service", func() { Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "420s", "5s").Should(ContainSubstring("test-clusterip")) - clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false) + clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false) cmd := "curl -L --insecure http://" + clusterip + "/name.html" fmt.Println(cmd) - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, nodeName) + return node.RunCmdOnNode(cmd) }, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) } }) It("After upgrade verifies NodePort Service", func() { - for _, nodeName := range serverNodeNames { - nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName) - cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" + for _, node := range tc.Servers { + nodeExternalIP, _ := node.FetchNodeExternalIP() + cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" nodeport, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") @@ -320,9 +291,9 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }) It("After upgrade verifies LoadBalancer Service", func() { - for _, nodeName := range serverNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) - cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" + for _, node := range tc.Servers { + ip, _ := node.FetchNodeExternalIP() + cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" port, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { @@ -331,15 +302,15 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) } }) It("After upgrade verifies Ingress", func() { - for _, nodeName := range serverNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) + for _, node := range tc.Servers { + ip, _ := node.FetchNodeExternalIP() cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html" fmt.Println(cmd) @@ -350,28 +321,24 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }) It("After upgrade verifies Daemonset", func() { - nodes, _ := e2e.ParseNodes(kubeConfigFile, false) //nodes := + nodes, _ := e2e.ParseNodes(tc.KubeConfigFile, false) Eventually(func(g Gomega) { - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - fmt.Println("POD COUNT") - fmt.Println(count) - fmt.Println("NODE COUNT") - fmt.Println(len(nodes)) - g.Expect(len(nodes)).Should(Equal(count), "Daemonset pod count does not match node count") - }, "420s", "1s").Should(Succeed()) + count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count") + }, "240s", "10s").Should(Succeed()) }) It("After upgrade verifies dns access", func() { Eventually(func() (string, error) { - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" + cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" return e2e.RunCommand(cmd) }, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local"))) }) It("After upgrade verify Local Path Provisioner storage ", func() { Eventually(func() (string, error) { - cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test" + cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeConfigFile + " -- cat /data/test" return e2e.RunCommand(cmd) }, "180s", "2s").Should(ContainSubstring("local-path-test")) }) @@ -385,12 +352,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/validatecluster/validatecluster_test.go b/tests/e2e/validatecluster/validatecluster_test.go index 2c4807cce98d..72ff3052f0d3 100644 --- a/tests/e2e/validatecluster/validatecluster_test.go +++ b/tests/e2e/validatecluster/validatecluster_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -35,11 +36,7 @@ func Test_E2EClusterValidation(t *testing.T) { RunSpecs(t, "Create Cluster Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) @@ -48,61 +45,51 @@ var _ = Describe("Verify Create", Ordered, func() { It("Starts up with no issues", func() { var err error if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) + tc.Hardened = *hardened + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) }) - It("Checks Node and Pod Status", func() { + It("Checks node and pod status", func() { fmt.Printf("\nFetching node status\n") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) + _, _ = e2e.ParseNodes(tc.KubeConfigFile, true) fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) + e2e.DumpPods(tc.KubeConfigFile) }) It("Verifies ClusterIP Service", func() { - res, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened) + res, err := tc.DeployWorkload("clusterip.yaml") Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res) Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: %q result: %s", cmd, res) }, "240s", "5s").Should(Succeed()) - clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false) + clusterip, _ := e2e.FetchClusterIP(tc.KubeConfigFile, "nginx-clusterip-svc", false) cmd := "curl -L --insecure http://" + clusterip + "/name.html" - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) + res, err := node.RunCmdOnNode(cmd) g.Expect(err).NotTo(HaveOccurred()) Expect(res).Should(ContainSubstring("test-clusterip")) }, "120s", "10s").Should(Succeed()) @@ -110,17 +97,17 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies NodePort Service", func() { - _, err := e2e.DeployWorkload("nodeport.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("nodeport.yaml") Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed") - for _, nodeName := range serverNodeNames { - nodeExternalIP, _ := e2e.FetchNodeExternalIP(nodeName) - cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" + for _, node := range tc.Servers { + nodeExternalIP, _ := node.FetchNodeExternalIP() + cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" nodeport, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") @@ -137,18 +124,18 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies LoadBalancer Service", func() { - _, err := e2e.DeployWorkload("loadbalancer.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("loadbalancer.yaml") Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed") - for _, nodeName := range serverNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) + for _, node := range tc.Servers { + ip, _ := node.FetchNodeExternalIP() - cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" + cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" port, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("test-loadbalancer")) @@ -164,11 +151,11 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies Ingress", func() { - _, err := e2e.DeployWorkload("ingress.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("ingress.yaml") Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") - for _, nodeName := range serverNodeNames { - ip, _ := e2e.FetchNodeExternalIP(nodeName) + for _, node := range tc.Servers { + ip, _ := node.FetchNodeExternalIP() cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html" fmt.Println(cmd) @@ -181,35 +168,31 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies Daemonset", func() { - _, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("daemonset.yaml") Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - nodes, _ := e2e.ParseNodes(kubeConfigFile, false) + nodes, _ := e2e.ParseNodes(tc.KubeConfigFile, false) Eventually(func(g Gomega) { - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - fmt.Println("POD COUNT") - fmt.Println(count) - fmt.Println("NODE COUNT") - fmt.Println(len(nodes)) - g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") - }, "420s", "10s").Should(Succeed()) + count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(nodes).To(HaveLen(count), "Daemonset pod count does not match node count") + }, "240s", "10s").Should(Succeed()) }) It("Verifies dns access", func() { - _, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, *hardened) + _, err := tc.DeployWorkload("dnsutils.yaml") Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed") Eventually(func(g Gomega) { - cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("dnsutils")) }, "420s", "2s").Should(Succeed()) Eventually(func(g Gomega) { - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" + cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) @@ -218,11 +201,11 @@ var _ = Describe("Verify Create", Ordered, func() { }) It("Verifies Local Path Provisioner storage ", func() { - res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) + res, err := tc.DeployWorkload("local-path-provisioner.yaml") Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res) Eventually(func(g Gomega) { - cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("local-path-pvc")) @@ -230,32 +213,32 @@ var _ = Describe("Verify Create", Ordered, func() { }, "420s", "2s").Should(Succeed()) Eventually(func(g Gomega) { - cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("volume-test")) g.Expect(res).Should(ContainSubstring("Running")) }, "420s", "2s").Should(Succeed()) - cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" + cmd := "kubectl --kubeconfig=" + tc.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" res, err = e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) - cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile + cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeConfigFile res, err = e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) - _, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) + _, err = tc.DeployWorkload("local-path-provisioner.yaml") Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed") Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeConfigFile res, _ := e2e.RunCommand(cmd) g.Expect(res).Should(ContainSubstring("local-path-provisioner")) }, "420s", "2s").Should(Succeed()) Eventually(func(g Gomega) { - cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeConfigFile res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) @@ -264,7 +247,7 @@ var _ = Describe("Verify Create", Ordered, func() { }, "420s", "2s").Should(Succeed()) Eventually(func(g Gomega) { - cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test" + cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeConfigFile + " -- cat /data/test" res, err = e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) fmt.Println("Data after re-creation", res) @@ -275,67 +258,52 @@ var _ = Describe("Verify Create", Ordered, func() { Context("Validate restart", func() { It("Restarts normally", func() { - errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...)) + errRestart := e2e.RestartCluster(append(tc.Servers, tc.Agents...)) Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { g.Expect(node.Status).Should(Equal("Ready")) } - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") - podsRunningAr := 0 - for _, pod := range pods { - if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { - podsRunningAr++ - } - } - g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart") + count, err := e2e.GetDaemonsetReady("test-daemonset", tc.KubeConfigFile) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pods that are ready does not match node count") }, "620s", "5s").Should(Succeed()) }) }) Context("Valdiate Certificate Rotation", func() { It("Stops K3s and rotates certificates", func() { - errStop := e2e.StopCluster(serverNodeNames) + errStop := e2e.StopCluster(tc.Servers) Expect(errStop).NotTo(HaveOccurred(), "Cluster could not be stopped successfully") - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { cmd := "k3s certificate rotate" - _, err := e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully on "+nodeName) + _, err := node.RunCmdOnNode(cmd) + Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully on "+node.String()) } }) It("Start normally", func() { // Since we stopped all the server, we have to start 2 at once to get it back up // If we only start one at a time, the first will hang waiting for the second to be up - _, err := e2e.RunCmdOnNode("systemctl --no-block start k3s", serverNodeNames[0]) + _, err := tc.Servers[0].RunCmdOnNode("systemctl --no-block start k3s") Expect(err).NotTo(HaveOccurred()) - err = e2e.StartCluster(serverNodeNames[1:]) + err = e2e.StartCluster(tc.Servers[1:]) Expect(err).NotTo(HaveOccurred(), "Cluster could not be started successfully") Eventually(func(g Gomega) { - for _, nodeName := range serverNodeNames { + for _, node := range tc.Servers { cmd := "test ! -e /var/lib/rancher/k3s/server/tls/dynamic-cert-regenerate" - _, err := e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred(), "Dynamic cert regenerate file not removed on "+nodeName) + _, err := node.RunCmdOnNode(cmd) + Expect(err).NotTo(HaveOccurred(), "Dynamic cert regenerate file not removed on "+node.String()) } }, "620s", "5s").Should(Succeed()) - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) }, "620s", "5s").Should(Succeed()) }) @@ -354,21 +322,21 @@ var _ = Describe("Verify Create", Ordered, func() { "", } - for _, nodeName := range serverNodeNames { - grCert, errGrep := e2e.RunCmdOnNode(grepCert, nodeName) - Expect(errGrep).NotTo(HaveOccurred(), "TLS dirs could not be listed on "+nodeName) + for _, node := range tc.Servers { + grCert, errGrep := node.RunCmdOnNode(grepCert) + Expect(errGrep).NotTo(HaveOccurred(), "TLS dirs could not be listed on "+node.String()) re := regexp.MustCompile("tls-[0-9]+") tls := re.FindAllString(grCert, -1)[0] diff := fmt.Sprintf("diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+ "| grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls) - result, err := e2e.RunCmdOnNode(diff, nodeName) - Expect(err).NotTo(HaveOccurred(), "Certificate diff not created successfully on "+nodeName) + result, err := node.RunCmdOnNode(diff) + Expect(err).NotTo(HaveOccurred(), "Certificate diff not created successfully on "+node.String()) certArray := strings.Split(result, "\n") - Expect((certArray)).Should((Equal(expectResult)), "Certificate diff does not match the expected results on "+nodeName) + Expect((certArray)).Should((Equal(expectResult)), "Certificate diff does not match the expected results on "+node.String()) } - errRestartAgent := e2e.RestartCluster(agentNodeNames) + errRestartAgent := e2e.RestartCluster(tc.Agents) Expect(errRestartAgent).NotTo(HaveOccurred(), "Agent could not be restart successfully") }) @@ -382,12 +350,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(serverNodeNames, agentNodeNames...))) + AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, append(tc.Servers, tc.Agents...))) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/e2e/wasm/wasm_test.go b/tests/e2e/wasm/wasm_test.go index 237bc440d613..d6869cbb1a37 100644 --- a/tests/e2e/wasm/wasm_test.go +++ b/tests/e2e/wasm/wasm_test.go @@ -4,9 +4,9 @@ import ( "flag" "fmt" "os" - "strings" "testing" + "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -26,94 +26,84 @@ func Test_E2EWasm(t *testing.T) { RunSpecs(t, "Run WebAssenbly Workloads Test Suite", suiteConfig, reporterConfig) } -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) +var tc *e2e.TestConfig var _ = ReportAfterEach(e2e.GenReport) -var _ = Describe("Verify Can run Wasm workloads", Ordered, func() { - - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - // Server node needs to be ready before we continue - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) +var _ = Describe("Verify K3s can run Wasm workloads", Ordered, func() { + Context("Cluster comes up with Wasm configuration", func() { + It("Starts up with no issues", func() { + var err error + if *local { + tc, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) + } else { + tc, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + By("CLUSTER CONFIG") + By("OS: " + *nodeOS) + By(tc.Status()) + }) + + It("Checks node and pod status", func() { + By("Fetching Nodes status") + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(tc.KubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) } - } - }, "620s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - }) + }, "620s", "5s").Should(Succeed()) + + By("Fetching pod status") + Eventually(func() error { + return tests.AllPodsUp(tc.KubeConfigFile) + }, "620s", "10s").Should(Succeed()) + Eventually(func() error { + return tests.CheckDefaultDeployments(tc.KubeConfigFile) + }, "300s", "10s").Should(Succeed()) + }) - It("Verify wasm-related containerd shims are installed", func() { - expected_shims := []string{"containerd-shim-spin-v2", "containerd-shim-slight-v1"} - for _, node := range append(serverNodeNames, agentNodeNames...) { - for _, shim := range expected_shims { - cmd := fmt.Sprintf("which %s", shim) - _, err := e2e.RunCmdOnNode(cmd, node) - Expect(err).NotTo(HaveOccurred()) + It("Verify wasm-related containerd shims are installed", func() { + expected_shims := []string{"containerd-shim-spin-v2", "containerd-shim-slight-v1"} + for _, node := range append(tc.Servers, tc.Agents...) { + for _, shim := range expected_shims { + cmd := fmt.Sprintf("which %s", shim) + _, err := node.RunCmdOnNode(cmd) + Expect(err).NotTo(HaveOccurred()) + } } - } + }) }) Context("Verify Wasm workloads can run on the cluster", func() { It("Deploy Wasm workloads", func() { - out, err := e2e.DeployWorkload("wasm-workloads.yaml", kubeConfigFile, false) + out, err := tc.DeployWorkload("wasm-workloads.yaml") Expect(err).NotTo(HaveOccurred(), out) }) It("Wait for slight Pod to be up and running", func() { Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l app=wasm-slight --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l app=wasm-slight --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("pod/wasm-slight")) }) It("Wait for spin Pod to be up and running", func() { Eventually(func() (string, error) { - cmd := "kubectl get pods -o=name -l app=wasm-spin --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile + cmd := "kubectl get pods -o=name -l app=wasm-spin --field-selector=status.phase=Running --kubeconfig=" + tc.KubeConfigFile return e2e.RunCommand(cmd) }, "120s", "5s").Should(ContainSubstring("pod/wasm-spin")) }) It("Interact with Wasm applications", func() { - ingressIPs, err := e2e.FetchIngressIP(kubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - Expect(ingressIPs).To(HaveLen(1)) + var ingressIPs []string + var err error + Eventually(func(g Gomega) { + ingressIPs, err = e2e.FetchIngressIP(tc.KubeConfigFile) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(ingressIPs).To(HaveLen(1)) + }, "120s", "5s").Should(Succeed()) endpoints := []string{"slight/hello", "spin/go-hello", "spin/hello"} for _, endpoint := range endpoints { @@ -136,12 +126,12 @@ var _ = AfterEach(func() { var _ = AfterSuite(func() { if failed { - Expect(e2e.SaveJournalLogs(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.SaveJournalLogs(append(tc.Servers, tc.Agents...))).To(Succeed()) } else { - Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed()) + Expect(e2e.GetCoverageReport(append(tc.Servers, tc.Agents...))).To(Succeed()) } if !failed || *ci { Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) + Expect(os.Remove(tc.KubeConfigFile)).To(Succeed()) } }) diff --git a/tests/integration/cacertrotation/cacertrotation_int_test.go b/tests/integration/cacertrotation/cacertrotation_int_test.go index 31df7369a772..250ab5dc33f8 100644 --- a/tests/integration/cacertrotation/cacertrotation_int_test.go +++ b/tests/integration/cacertrotation/cacertrotation_int_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -36,7 +37,7 @@ var _ = Describe("ca certificate rotation", Ordered, func() { When("a new server is created", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) It("get certificate hash", func() { @@ -69,7 +70,7 @@ var _ = Describe("ca certificate rotation", Ordered, func() { }) It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "360s", "5s").Should(Succeed()) }) It("get certificate hash", func() { diff --git a/tests/integration/certrotation/certrotation_int_test.go b/tests/integration/certrotation/certrotation_int_test.go index 9574a6b02ec3..33349a9a4bea 100644 --- a/tests/integration/certrotation/certrotation_int_test.go +++ b/tests/integration/certrotation/certrotation_int_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -35,7 +36,7 @@ var _ = Describe("certificate rotation", Ordered, func() { When("a new server is created", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) It("get certificate hash", func() { @@ -61,7 +62,7 @@ var _ = Describe("certificate rotation", Ordered, func() { }) It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "360s", "5s").Should(Succeed()) }) It("checks the certificate status", func() { diff --git a/tests/integration/dualstack/dualstack_int_test.go b/tests/integration/dualstack/dualstack_int_test.go index 1e8198642e15..9b19e045d009 100644 --- a/tests/integration/dualstack/dualstack_int_test.go +++ b/tests/integration/dualstack/dualstack_int_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -40,7 +41,7 @@ var _ = Describe("dual stack", Ordered, func() { When("a ipv4 and ipv6 cidr is present", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "10s").Should(Succeed()) }) It("creates pods with two IPs", func() { diff --git a/tests/integration/etcdrestore/etcd_restore_int_test.go b/tests/integration/etcdrestore/etcd_restore_int_test.go index 5ea168d53237..1a4f200f60b4 100644 --- a/tests/integration/etcdrestore/etcd_restore_int_test.go +++ b/tests/integration/etcdrestore/etcd_restore_int_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -33,7 +34,7 @@ var _ = Describe("etcd snapshot restore", Ordered, func() { When("a snapshot is restored on existing node", func() { It("etcd starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) It("create a workload", func() { @@ -85,7 +86,7 @@ var _ = Describe("etcd snapshot restore", Ordered, func() { }) It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "360s", "5s").Should(Succeed()) }) It("make sure workload 1 exists", func() { diff --git a/tests/integration/etcdsnapshot/etcdsnapshot_int_test.go b/tests/integration/etcdsnapshot/etcdsnapshot_int_test.go index b4977639b60e..dc11b9e438d6 100644 --- a/tests/integration/etcdsnapshot/etcdsnapshot_int_test.go +++ b/tests/integration/etcdsnapshot/etcdsnapshot_int_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -40,7 +41,7 @@ var _ = Describe("etcd snapshots", Ordered, func() { When("a new etcd is created", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "10s").Should(Succeed()) }) It("saves an etcd snapshot", func() { @@ -130,7 +131,7 @@ var _ = Describe("etcd snapshots", Ordered, func() { server, err = testutil.K3sStartServer(localServerArgs...) Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) @@ -156,7 +157,7 @@ var _ = Describe("etcd snapshots", Ordered, func() { server, err = testutil.K3sStartServer(localServerArgs...) Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) diff --git a/tests/integration/flannelipv6masq/flannelipv6masq_int_test.go b/tests/integration/flannelipv6masq/flannelipv6masq_int_test.go index 793810ea64b0..7f776584016c 100644 --- a/tests/integration/flannelipv6masq/flannelipv6masq_int_test.go +++ b/tests/integration/flannelipv6masq/flannelipv6masq_int_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -41,7 +42,7 @@ var _ = Describe("flannel-ipv6-masq", Ordered, func() { When("a ipv4 and ipv6 cidr is present", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "10s").Should(Succeed()) }) It("creates pods with two IPs", func() { diff --git a/tests/integration/flannelnone/flannelnone_int_test.go b/tests/integration/flannelnone/flannelnone_int_test.go index 13156cce6aea..420734390d55 100644 --- a/tests/integration/flannelnone/flannelnone_int_test.go +++ b/tests/integration/flannelnone/flannelnone_int_test.go @@ -1,5 +1,5 @@ /* -This test verifies that even if we use flannel-backend=none, kube-api starts correctly so that it can +This test verifies that even if we use flannel-backend=none, kube-api starts correctly so that it can accept the custom CNI plugin manifest. We want to catch regressions in which kube-api is unresponsive. To do so we check for 25s that we can consistently query kube-api. We check that pods are in pending state, which is what should happen if there is no cni plugin @@ -14,8 +14,6 @@ import ( testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var server *testutil.K3sServer @@ -44,25 +42,24 @@ var _ = Describe("flannel-backend=none", Ordered, func() { It("checks pods status", func() { // Wait for pods to come up before running the real test Eventually(func() int { - pods, _ := testutil.ParsePods("kube-system", metav1.ListOptions{}) + pods, _ := testutil.ParsePodsInNS("kube-system") return len(pods) }, "180s", "5s").Should(BeNumerically(">", 0)) - - pods, err := testutil.ParsePods("kube-system", metav1.ListOptions{}) + pods, err := testutil.ParsePodsInNS("kube-system") Expect(err).NotTo(HaveOccurred()) // Pods should remain in Pending state because there is no network plugin - Consistently(func () bool { + Consistently(func() bool { for _, pod := range pods { if !strings.Contains(string(pod.Status.Phase), "Pending") { return false } } return true - }, "25s").Should(BeTrue()) - }) + }, "25s").Should(BeTrue()) }) + }) }) var failed bool diff --git a/tests/integration/integration.go b/tests/integration/integration.go index 6ff0f7cbb192..0c96d40af897 100644 --- a/tests/integration/integration.go +++ b/tests/integration/integration.go @@ -15,20 +15,20 @@ import ( "time" "github.com/k3s-io/k3s/pkg/flock" + "github.com/k3s-io/k3s/tests" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" ) // Compile-time variable var existingServer = "False" const lockFile = "/tmp/k3s-test.lock" +const DefaultConfig = "/etc/rancher/k3s/k3s.yaml" type K3sServer struct { cmd *exec.Cmd @@ -128,60 +128,8 @@ func K3sServerArgs() []string { return args } -// K3sDefaultDeployments checks if the default deployments for K3s are ready, otherwise returns an error -func K3sDefaultDeployments() error { - return CheckDeployments(metav1.NamespaceSystem, []string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}) -} - -// CheckDeployments checks if the provided list of deployments are ready, otherwise returns an error -func CheckDeployments(namespace string, deployments []string) error { - client, err := k8sClient() - if err != nil { - return err - } - - for _, deploymentName := range deployments { - deployment, err := client.AppsV1().Deployments(namespace).Get(context.Background(), deploymentName, metav1.GetOptions{}) - if err != nil { - return err - } - if deployment.Status.ReadyReplicas != deployment.Status.Replicas || deployment.Status.AvailableReplicas != deployment.Status.Replicas { - return fmt.Errorf("deployment %s not ready: replicas=%d readyReplicas=%d availableReplicas=%d", - deploymentName, deployment.Status.Replicas, deployment.Status.ReadyReplicas, deployment.Status.AvailableReplicas) - } - } - - return nil -} - -func ParsePods(namespace string, opts metav1.ListOptions) ([]corev1.Pod, error) { - clientSet, err := k8sClient() - if err != nil { - return nil, err - } - pods, err := clientSet.CoreV1().Pods(namespace).List(context.Background(), opts) - if err != nil { - return nil, err - } - - return pods.Items, nil -} - -func ParseNodes() ([]corev1.Node, error) { - clientSet, err := k8sClient() - if err != nil { - return nil, err - } - nodes, err := clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - - return nodes.Items, nil -} - func GetPod(namespace, name string) (*corev1.Pod, error) { - client, err := k8sClient() + client, err := tests.K8sClient(DefaultConfig) if err != nil { return nil, err } @@ -189,7 +137,7 @@ func GetPod(namespace, name string) (*corev1.Pod, error) { } func GetPersistentVolumeClaim(namespace, name string) (*corev1.PersistentVolumeClaim, error) { - client, err := k8sClient() + client, err := tests.K8sClient(DefaultConfig) if err != nil { return nil, err } @@ -197,7 +145,7 @@ func GetPersistentVolumeClaim(namespace, name string) (*corev1.PersistentVolumeC } func GetPersistentVolume(name string) (*corev1.PersistentVolume, error) { - client, err := k8sClient() + client, err := tests.K8sClient(DefaultConfig) if err != nil { return nil, err } @@ -355,7 +303,7 @@ func K3sSaveLog(server *K3sServer, dump bool) error { } func GetEndpointsAddresses() (string, error) { - client, err := k8sClient() + client, err := tests.K8sClient(DefaultConfig) if err != nil { return "", err } @@ -414,14 +362,15 @@ func unmountFolder(folder string) error { return nil } -func k8sClient() (*kubernetes.Clientset, error) { - config, err := clientcmd.BuildConfigFromFlags("", "/etc/rancher/k3s/k3s.yaml") +func ParsePodsInNS(namespace string) ([]corev1.Pod, error) { + clientSet, err := tests.K8sClient(DefaultConfig) if err != nil { return nil, err } - clientSet, err := kubernetes.NewForConfig(config) + pods, err := clientSet.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil { return nil, err } - return clientSet, nil + + return pods.Items, nil } diff --git a/tests/integration/kubeflags/kubeflags_test.go b/tests/integration/kubeflags/kubeflags_test.go index 90f4baabab0d..67baee060245 100644 --- a/tests/integration/kubeflags/kubeflags_test.go +++ b/tests/integration/kubeflags/kubeflags_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -126,7 +127,7 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() { // Pods should not be healthy without kube-proxy Consistently(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "100s", "5s").Should(HaveOccurred()) }) It("should not find kube-proxy starting", func() { @@ -178,7 +179,7 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() { Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) diff --git a/tests/integration/localstorage/localstorage_int_test.go b/tests/integration/localstorage/localstorage_int_test.go index f4ea258921ad..d7a898c8ebd0 100644 --- a/tests/integration/localstorage/localstorage_int_test.go +++ b/tests/integration/localstorage/localstorage_int_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -35,7 +36,7 @@ var _ = Describe("local storage", Ordered, func() { When("a new local storage is created", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) }) It("creates a new pvc", func() { diff --git a/tests/integration/longhorn/longhorn_int_test.go b/tests/integration/longhorn/longhorn_int_test.go index e4c475e71605..2fecd7a936a9 100644 --- a/tests/integration/longhorn/longhorn_int_test.go +++ b/tests/integration/longhorn/longhorn_int_test.go @@ -6,10 +6,10 @@ import ( "strings" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var server *testutil.K3sServer @@ -38,7 +38,7 @@ var _ = Describe("longhorn", Ordered, func() { When("a new cluster is created", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) }) }) @@ -57,7 +57,7 @@ var _ = Describe("longhorn", Ordered, func() { }) It("starts the longhorn pods with no problems", func() { Eventually(func() error { - pods, err := testutil.ParsePods("longhorn-system", metav1.ListOptions{}) + pods, err := testutil.ParsePodsInNS("longhorn-system") if err != nil { return err } diff --git a/tests/integration/secretsencryption/secretsencryption_int_test.go b/tests/integration/secretsencryption/secretsencryption_int_test.go index 247f9d714ec1..30126a77ac8a 100644 --- a/tests/integration/secretsencryption/secretsencryption_int_test.go +++ b/tests/integration/secretsencryption/secretsencryption_int_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -35,7 +36,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() { When("A server starts with secrets encryption", func() { It("starts up with no problems", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) It("it creates a encryption key", func() { @@ -66,7 +67,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() { secretsEncryptionServer, err = testutil.K3sStartServer(secretsEncryptionServerArgs...) Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) Eventually(func() (string, error) { return testutil.K3sCmd("secrets-encrypt status -d", secretsEncryptionDataDir) @@ -91,7 +92,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() { secretsEncryptionServer, err = testutil.K3sStartServer(secretsEncryptionServerArgs...) Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) Eventually(func() (string, error) { @@ -128,7 +129,7 @@ var _ = Describe("secrets encryption rotation", Ordered, func() { secretsEncryptionServer, err = testutil.K3sStartServer(secretsEncryptionServerArgs...) Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) time.Sleep(10 * time.Second) }) diff --git a/tests/integration/startup/startup_int_test.go b/tests/integration/startup/startup_int_test.go index bc339f8d4a57..111328a8cc95 100644 --- a/tests/integration/startup/startup_int_test.go +++ b/tests/integration/startup/startup_int_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + tests "github.com/k3s-io/k3s/tests" testutil "github.com/k3s-io/k3s/tests/integration" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -37,7 +38,7 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) }) It("has kine without tls", func() { @@ -78,7 +79,7 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) }) It("set kine to use tls", func() { @@ -107,7 +108,7 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) }) It("dies cleanly", func() { @@ -124,9 +125,9 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods without traefik deployed", func() { Eventually(func() error { - return testutil.CheckDeployments("kube-system", []string{"coredns", "local-path-provisioner", "metrics-server"}) + return tests.CheckDeployments([]string{"coredns", "local-path-provisioner", "metrics-server"}, testutil.DefaultConfig) }, "90s", "10s").Should(Succeed()) - nodes, err := testutil.ParseNodes() + nodes, err := tests.ParseNodes(testutil.DefaultConfig) Expect(err).NotTo(HaveOccurred()) Expect(nodes).To(HaveLen(1)) }) @@ -156,10 +157,10 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the node deployed with correct IPs", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "10s").Should(Succeed()) - nodes, err := testutil.ParseNodes() + nodes, err := tests.ParseNodes(testutil.DefaultConfig) Expect(err).NotTo(HaveOccurred()) Expect(nodes).To(HaveLen(1)) Expect(nodes[0].Status.Addresses).To(ContainElements([]v1.NodeAddress{ @@ -201,9 +202,9 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) - nodes, err := testutil.ParseNodes() + nodes, err := tests.ParseNodes(testutil.DefaultConfig) Expect(err).NotTo(HaveOccurred()) Expect(nodes).To(HaveLen(1)) }) @@ -229,16 +230,16 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) - nodes, err := testutil.ParseNodes() + nodes, err := tests.ParseNodes(testutil.DefaultConfig) Expect(err).NotTo(HaveOccurred()) Expect(nodes).To(HaveLen(1)) }) var nodes []v1.Node It("has a custom node name with id appended", func() { var err error - nodes, err = testutil.ParseNodes() + nodes, err = tests.ParseNodes(testutil.DefaultConfig) Expect(err).NotTo(HaveOccurred()) Expect(nodes).To(HaveLen(1)) Expect(nodes[0].Name).To(MatchRegexp(`-[0-9a-f]*`)) @@ -264,9 +265,9 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) - nodes, err := testutil.ParseNodes() + nodes, err := tests.ParseNodes(testutil.DefaultConfig) Expect(err).NotTo(HaveOccurred()) Expect(nodes).To(HaveLen(1)) }) @@ -285,7 +286,7 @@ var _ = Describe("startup tests", Ordered, func() { }) It("has the default pods deployed", func() { Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "120s", "5s").Should(Succeed()) }) It("creates a new pod", func() { @@ -301,7 +302,7 @@ var _ = Describe("startup tests", Ordered, func() { startupServer, err = testutil.K3sStartServer(startupServerArgs...) Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - return testutil.K3sDefaultDeployments() + return tests.CheckDefaultDeployments(testutil.DefaultConfig) }, "180s", "5s").Should(Succeed()) }) It("has the dummy pod not restarted", func() {