From 91cd55021d21e632d0f5b181588e47d572201aae Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Fri, 22 Mar 2024 08:38:22 -0400 Subject: [PATCH 01/10] Set envtest version than use latest latest moved to go 1.22 (for k8s 1.30 requirements), causing envtest failures locally. Set the version to use, thereby avoiding surprises. Signed-off-by: Shyamsundar Ranganathan --- hack/install-setup-envtest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/install-setup-envtest.sh b/hack/install-setup-envtest.sh index f199d3dab..6d1c30cc4 100755 --- a/hack/install-setup-envtest.sh +++ b/hack/install-setup-envtest.sh @@ -3,7 +3,7 @@ set -e script_dir="$(cd "$(dirname "$0")" && pwd)" -required_version="latest" +required_version="release-0.17" source_url="sigs.k8s.io/controller-runtime/tools/setup-envtest@${required_version}" target_dir="${script_dir}/../testbin" target_path="${target_dir}/setup-envtest" From baa03a39d770f738ac45c2bb315a6a23ade550e8 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 13 Mar 2024 16:15:58 +0200 Subject: [PATCH 02/10] Build the ramen container before we need it Building the container is fast but we there is no point to build it before we need it. If the build fails in easier step this just add noise to the build and waste resources. Signed-off-by: Nir Soffer --- .github/workflows/e2e.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 74025d93a..e05ea9b96 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -30,9 +30,6 @@ jobs: - name: Install ramenctl run: pip install -e ramenctl - - name: Build ramen-operator container - run: make docker-build - - name: Delete clusters if: ${{ always() }} working-directory: test @@ -50,6 +47,9 @@ jobs: cd test drenv start --max-workers ${{ env.MAX_WORKERS }} --name-prefix ${{ env.NAME_PREFIX }} envs/regional-dr.yaml + - name: Build ramen-operator container + run: make docker-build + - name: Deploy ramen run: ramenctl deploy --name-prefix ${{ env.NAME_PREFIX }} test/envs/regional-dr.yaml From 5f64da9c3cb3f307bdc099e53addea90f2a64ec0 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 13 Mar 2024 16:18:12 +0200 Subject: [PATCH 03/10] Remove pointless docker-build The e2e-rdr target was depending on docker-build, so when we run it we rebuild the container. This is fast since we already built the container before deploying it with `ramenctl deploy`, but it is pointless to run this before each test run because the built container is not deployed to the clusters. Rebuilding the container and pushing it to the clusters when running the tests is a slow process so it it best done manually only when needed. When we have working e2e tests and more experience with using it we can consider optimizing the workflow. Signed-off-by: Nir Soffer --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2b789b5ec..317fefb27 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ test-drenv: ## Run drenv tests. test-ramenctl: ## Run ramenctl tests. $(MAKE) -C ramenctl -e2e-rdr: generate manifests docker-build ## Run rdr-e2e tests. +e2e-rdr: generate manifests ## Run rdr-e2e tests. ./e2e/rdr-e2e.sh coverage: From 46f22ff9b4d7770c53b01950dc6806932eb747c6 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Mon, 18 Mar 2024 00:28:36 +0200 Subject: [PATCH 04/10] Disable checks for ceph osd blocklist They were added because the blocklist looked wrong, and I suspected that failures were caused by the blocked client. However if we disable the blocklist checks the rbd-mirror test works fine. In the CI lab, these checks are the most common failure reason. Testing show 50% decrease in number of failures. run cpus runs passed failed success(%) time(s) ------------------------------------------------------- before 4 50 39 11 78 26879 after 4 50 44 6 88 28571 The drenv.ceph module is unused now, but plan to use it soon for gathering ceph state on errors, so I kept it. We can delete the clear_blocklist() function but it is tiny so I kept it as well. Signed-off-by: Nir Soffer --- test/addons/rbd-mirror/start | 29 ----------------------------- test/addons/rbd-mirror/test | 16 ---------------- 2 files changed, 45 deletions(-) diff --git a/test/addons/rbd-mirror/start b/test/addons/rbd-mirror/start index e49895330..2bc073b52 100755 --- a/test/addons/rbd-mirror/start +++ b/test/addons/rbd-mirror/start @@ -9,34 +9,11 @@ import os import sys import drenv -from drenv import ceph from drenv import kubectl POOL_NAME = "replicapool" -def clear_blocklist(cluster): - """ - Clear ceph blocklist. - - TODO: Maybe it is better to fail. - """ - blocklist = ceph.list_osd_blocklist(cluster) - if blocklist: - print(f"Clearing ceph osd blocklist on cluster {cluster}") - print(json.dumps(blocklist, indent=2)) - ceph.clear_osd_blocklist(cluster) - - -def check_blocklist(cluster): - """ - Fail if ceph osd blocklist is not empty. - """ - blocklist = ceph.list_osd_blocklist(cluster) - if blocklist: - raise RuntimeError(f"Ceph blocklist on cluster {cluster}: {blocklist}") - - def fetch_secret_info(cluster): info = {} @@ -152,9 +129,6 @@ os.chdir(os.path.dirname(__file__)) cluster1 = sys.argv[1] cluster2 = sys.argv[2] -clear_blocklist(cluster1) -clear_blocklist(cluster2) - cluster1_info = fetch_secret_info(cluster1) cluster2_info = fetch_secret_info(cluster2) @@ -170,7 +144,4 @@ wait_until_pool_mirroring_is_healthy(cluster2) deploy_vrc_sample(cluster1) deploy_vrc_sample(cluster2) -check_blocklist(cluster1) -check_blocklist(cluster2) - print("Mirroring was setup successfully") diff --git a/test/addons/rbd-mirror/test b/test/addons/rbd-mirror/test index de4dc4575..986602e1b 100755 --- a/test/addons/rbd-mirror/test +++ b/test/addons/rbd-mirror/test @@ -8,22 +8,12 @@ import os import sys import time -from drenv import ceph from drenv import kubectl POOL_NAME = "replicapool" PVC_NAME = "rbd-pvc" -def check_blocklist(cluster): - """ - Fail if ceph osd blocklist is not empty. - """ - blocklist = ceph.list_osd_blocklist(cluster) - if blocklist: - raise RuntimeError(f"Ceph blocklist on cluster {cluster}: {blocklist}") - - def rbd(*args, cluster=None): """ Run a rbd command using the ceph toolbox on the specified cluster and @@ -174,11 +164,5 @@ os.chdir(os.path.dirname(__file__)) cluster1 = sys.argv[1] cluster2 = sys.argv[2] -check_blocklist(cluster1) -check_blocklist(cluster2) - test_volume_replication(cluster1, cluster2) test_volume_replication(cluster2, cluster1) - -check_blocklist(cluster1) -check_blocklist(cluster2) From c1d3baf0f11b76bd7f50686f4ea87bd0cf102ae0 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Tue, 26 Mar 2024 16:23:29 +0530 Subject: [PATCH 05/10] Add kubevirt configs for OpenShift Add OpenShift test configuration for all VM samples. With this change you can test all kubevirt vms also on OpenShift environment. Notes: - If your clusterset name is not "submariner", you need to fork the samples repo and modify the subscription kustomization. - You may need to modify the dr_policy name to match the cluster drpolicy or add a new drpolicy. - vm-dv and vm-dvt requires currnet upstream kubevirt and cdi. We can test now only basic-test/{deploy,enable-dr,disable-dr,undeploy}. Signed-off-by: Nir Soffer --- test/configs/kubevirt/vm-dv-odr-regional.yaml | 11 +++++++++++ test/configs/kubevirt/vm-dvt-odr-regional.yaml | 11 +++++++++++ test/configs/kubevirt/vm-pvc-odr-regional.yaml | 11 +++++++++++ 3 files changed, 33 insertions(+) create mode 100644 test/configs/kubevirt/vm-dv-odr-regional.yaml create mode 100644 test/configs/kubevirt/vm-dvt-odr-regional.yaml create mode 100644 test/configs/kubevirt/vm-pvc-odr-regional.yaml diff --git a/test/configs/kubevirt/vm-dv-odr-regional.yaml b/test/configs/kubevirt/vm-dv-odr-regional.yaml new file mode 100644 index 000000000..81ab82c06 --- /dev/null +++ b/test/configs/kubevirt/vm-dv-odr-regional.yaml @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +repo: https://github.com/ramendr/ocm-ramen-samples.git +path: subscription/kubevirt/vm-dv-odr-regional +branch: main +name: vm-dv +namespace: vm-dv +dr_policy: dr-policy +pvc_label: vm diff --git a/test/configs/kubevirt/vm-dvt-odr-regional.yaml b/test/configs/kubevirt/vm-dvt-odr-regional.yaml new file mode 100644 index 000000000..0b8e676df --- /dev/null +++ b/test/configs/kubevirt/vm-dvt-odr-regional.yaml @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +repo: https://github.com/ramendr/ocm-ramen-samples.git +path: subscription/kubevirt/vm-dvt-odr-regional +branch: main +name: vm-dvt +namespace: vm-dvt +dr_policy: dr-policy +pvc_label: vm diff --git a/test/configs/kubevirt/vm-pvc-odr-regional.yaml b/test/configs/kubevirt/vm-pvc-odr-regional.yaml new file mode 100644 index 000000000..c51a7490b --- /dev/null +++ b/test/configs/kubevirt/vm-pvc-odr-regional.yaml @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +repo: https://github.com/ramendr/ocm-ramen-samples.git +path: subscription/kubevirt/vm-pvc-odr-regional +branch: main +name: vm-pvc +namespace: vm-pvc +dr_policy: dr-policy +pvc_label: vm From 60502eaf9e73ccb5926e43a2455e2c97840a6f69 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Tue, 26 Mar 2024 19:57:21 +0530 Subject: [PATCH 06/10] Add examle odr environemnt file This example can be used with basic-test to run basic tests using ocm-ramen-samples on an OpenShift cluster: cp envs/odr.yaml . # modify cluster names if needed edit odr.yaml basic-test/run -c configs/kubevirt/vm-pvc-odr-regional odr.yaml Signed-off-by: Nir Soffer --- test/envs/odr.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 test/envs/odr.yaml diff --git a/test/envs/odr.yaml b/test/envs/odr.yaml new file mode 100644 index 000000000..f0b4c97ba --- /dev/null +++ b/test/envs/odr.yaml @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +# Environment for testing an external OpenShift Regional-DR setup using +# basic-test. Assuming that you used `oc login` to get all the clusters in a +# kubeconfig file used by the test. +--- +name: perf123 +ramen: + hub: perf1 + clusters: [perf2, perf3] + topology: regional-dr + features: + volsync: true From 68a60632aba94353f2fa4e149afa934d6338d6a6 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 20 Mar 2024 03:00:25 +0200 Subject: [PATCH 07/10] Add drenv stress test The test runs drenv start jobs for collecting stats and debugging failures. Signed-off-by: Nir Soffer --- test/stress-test/README.md | 136 ++++++++++++++++++++++++++++++++ test/stress-test/run | 155 +++++++++++++++++++++++++++++++++++++ 2 files changed, 291 insertions(+) create mode 100644 test/stress-test/README.md create mode 100755 test/stress-test/run diff --git a/test/stress-test/README.md b/test/stress-test/README.md new file mode 100644 index 000000000..e777064e7 --- /dev/null +++ b/test/stress-test/README.md @@ -0,0 +1,136 @@ +# drenv stress test + +This directory includes the drenv stress test for evaluating `drenv start` +robustness and debugging failed runs. + +The test support 2 modes of operation: + +- Collecting stats from long unattended test run +- Debugging a failed run + +## Collecting stats + +In this example we run 100 runs starting the regional-dr environment. If +a run fails, we delete the clusters and continue. This is useful for +understanding what are the most common failures. + +``` +stress-test/run -r 100 ../envs/regional-dr.yaml +``` + +This creates the `out` directory in the current directory, logging each +run in a separate log file, and saving test results in `test.json` file. +This run took more than 17 hours (626 seconds per build): + +``` +$ ls out +000.log 013.log 026.log 039.log 052.log 065.log 078.log 091.log +001.log 014.log 027.log 040.log 053.log 066.log 079.log 092.log +002.log 015.log 028.log 041.log 054.log 067.log 080.log 093.log +003.log 016.log 029.log 042.log 055.log 068.log 081.log 094.log +004.log 017.log 030.log 043.log 056.log 069.log 082.log 095.log +005.log 018.log 031.log 044.log 057.log 070.log 083.log 096.log +006.log 019.log 032.log 045.log 058.log 071.log 084.log 097.log +007.log 020.log 033.log 046.log 059.log 072.log 085.log 098.log +008.log 021.log 034.log 047.log 060.log 073.log 086.log 099.log +009.log 022.log 035.log 048.log 061.log 074.log 087.log test.json +010.log 023.log 036.log 049.log 062.log 075.log 088.log +011.log 024.log 037.log 050.log 063.log 076.log 089.log +012.log 025.log 038.log 051.log 064.log 077.log 090.log +``` + +To get test stats: + +``` +$ cat out/test.json | jq .stats +{ + "runs": 100, + "passed": 84, + "failed": 16, + "success": 84.0, + "time": 62694.784591522985, + "passed-time": 52647.00043984903, + "failed-time": 9723.622515744006 +} +``` + +To find the failed runs you can use look up the individual tests +results: + +``` +$ cat out/test.json +... + { + "name": "007", + "passed": false, + "time": 460.2368865620083 + }, +``` + +Or grep the logs: + +``` +$ grep ^drenv.commands.Error out/*.log +out/007.log:drenv.commands.Error: Command failed: +out/014.log:drenv.commands.Error: Command failed: +out/026.log:drenv.commands.Error: Command failed: +out/027.log:drenv.commands.Error: Command failed: +out/028.log:drenv.commands.Error: Command failed: +out/031.log:drenv.commands.Error: Command failed: +out/036.log:drenv.commands.Error: Command failed: +out/043.log:drenv.commands.Error: Command failed: +out/044.log:drenv.commands.Error: Command failed: +out/051.log:drenv.commands.Error: Command failed: +out/052.log:drenv.commands.Error: Command failed: +out/066.log:drenv.commands.Error: Command failed: +out/074.log:drenv.commands.Error: Command failed: +out/075.log:drenv.commands.Error: Command failed: +out/085.log:drenv.commands.Error: Command failed: +out/089.log:drenv.commands.Error: Command failed: +``` + +## Debugging a failed run + +In this mode the run exit cleanly after the first failure, leaving the +cluster running for inspection. + +``` +stress-test/run -r 100 -x ../envs/regional-dr.yaml +``` + +Because the failures are random, a run may fail very quickly or only +after many hours. As drenv becomes more reliable debugging random +failures will become harder. + +> [!IMPORTANT] +> After debugging the failure, you need to delete the environment +> manually. + +In this example the run failed after the first run: + +``` +$ ls out.3: +out.3: +000.log test.json +``` + +And here after 20 runs: + +``` +$ ls out.4: +000.log 003.log 006.log 009.log 012.log 015.log 018.log 021.log +001.log 004.log 007.log 010.log 013.log 016.log 019.log test.json +002.log 005.log 008.log 011.log 014.log 017.log 020.log +``` + +In both case the last run is the failure: + +``` +$ grep ^drenv.commands.Error out.[34]/*.log +out.3/000.log:drenv.commands.Error: Command failed: +out.4/021.log:drenv.commands.Error: Command failed: +``` + +The clusters are running, hopefully in the same state when the run +failed. Sometimes the cluster fixed itself after the failure, this +usually means some timeout was too short. diff --git a/test/stress-test/run b/test/stress-test/run new file mode 100755 index 000000000..ed3c77129 --- /dev/null +++ b/test/stress-test/run @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import json +import logging +import os +import subprocess +import sys +import time + + +def main(): + args = parse_args() + + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(message)s", + ) + + logging.info("Writing output to %s", args.outdir) + os.mkdir(args.outdir) + + results = [] + stats = { + "runs": 0, + "passed": 0, + "failed": 0, + "success": 0.0, + "time": 0.0, + "passed-time": 0.0, + "failed-time": 0.0, + } + + start = time.monotonic() + + for i in range(args.runs): + name = f"{i:03d}" + logging.info("[%s] Started", name) + r = run(name, args) + logging.info( + "[%s] %s in %.1f seconds", + name, + "PASSED" if r["passed"] else "FAILED", + r["time"], + ) + results.append(r) + + stats["runs"] += 1 + + if r["passed"]: + stats["passed"] += 1 + stats["passed-time"] += r["time"] + else: + stats["failed"] += 1 + stats["failed-time"] += r["time"] + + if not r["passed"] and args.exit_first: + break + + stats["time"] = time.monotonic() - start + stats["success"] = stats["passed"] / stats["runs"] * 100 + + logging.info( + "%d passed, %d failed (%.1f%%) in %.1fs", + stats["passed"], + stats["failed"], + stats["success"], + stats["time"], + ) + + write_output(args, results, stats) + + +def parse_args(): + p = argparse.ArgumentParser() + p.add_argument( + "-r", + "--runs", + type=int, + default=1, + help="number of runs (default 1)", + ) + p.add_argument( + "-o", + "--outdir", + default="out", + help="directroy for storing test output (default out)", + ) + p.add_argument( + "-x", + "--exit-first", + action="store_true", + help="exit on first failure without deleting the clusters", + ) + p.add_argument( + "--name-prefix", + help="prefix profile names", + ) + p.add_argument( + "envfile", + help="path to environment file", + ) + return p.parse_args() + + +def write_output(args, results, stats): + test = { + "config": { + "runs": args.runs, + "envfile": args.envfile, + "exit-first": args.exit_first, + "name-prefix": args.name_prefix, + }, + "results": results, + "stats": stats, + } + + test_file = os.path.join(args.outdir, "test.json") + with open(test_file, "w") as f: + json.dump(test, f, indent=2) + f.write("\n") + + +def run(name, args): + log = os.path.join(args.outdir, name + ".log") + + start = time.monotonic() + cp = drenv("start", args.envfile, log, name_prefix=args.name_prefix) + elapsed = time.monotonic() - start + passed = cp.returncode == 0 + + if passed or not args.exit_first: + drenv("delete", args.envfile, log, name_prefix=args.name_prefix, check=True) + + return { + "name": name, + "passed": passed, + "time": elapsed, + } + + +def drenv(command, envfile, log, name_prefix=None, check=False): + cmd = ["drenv", command, "--verbose"] + if name_prefix: + cmd.extend(("--name-prefix", name_prefix)) + cmd.append(envfile) + with open(log, "a") as f: + return subprocess.run(cmd, stderr=f, check=check) + + +if __name__ == "__main__": + main() From cc0d60b460fa7895f07443c69ba5e39540ffdd75 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Fri, 22 Mar 2024 19:41:36 +0200 Subject: [PATCH 08/10] Create drenv specific libvirt default network When running drenv in a libvirt VM using the default network (192.168.122.1/24) on the host, we cannot use same network inside the guest. Use drenv specific network (192.168.22.1/25) instead. This can be fail if the network is used, bu this is unlikely. Fixes #1297 Signed-off-by: Nir Soffer --- test/scripts/network.xml | 10 ++++++++++ test/scripts/setup-libvirt | 4 +++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 test/scripts/network.xml diff --git a/test/scripts/network.xml b/test/scripts/network.xml new file mode 100644 index 000000000..15f87ebdf --- /dev/null +++ b/test/scripts/network.xml @@ -0,0 +1,10 @@ + + default + + + + + + + + diff --git a/test/scripts/setup-libvirt b/test/scripts/setup-libvirt index ae98fe1c4..026a62c05 100755 --- a/test/scripts/setup-libvirt +++ b/test/scripts/setup-libvirt @@ -1,11 +1,13 @@ #!/bin/sh -e +base=$(dirname $0) + default_network_exists() { virsh -c qemu:///system net-list | grep -q default } create_default_network() { - virsh -c qemu:///system net-define /usr/share/libvirt/networks/default.xml + virsh -c qemu:///system net-define $base/network.xml virsh -c qemu:///system net-autostart default virsh -c qemu:///system net-start default } From 61e88f294650075c10cdf5b19b014d650e0af648 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Fri, 22 Mar 2024 20:14:40 +0200 Subject: [PATCH 09/10] Upgrade csi-addon to 0.8.0 Looking at cluster events $ kubectl events -A --context dr1 | grep 'pulled image "quay.io/csiaddons/' csi-addons-system ... Successfully pulled image "quay.io/csiaddons/k8s-controller:v0.7.0" ... ... Successfully pulled image "quay.io/csiaddons/k8s-sidecar:v0.8.0" ... we were mixing k8s-controller:v0.7.0 and k8s-sidecar:v0.8.0. Fixes #1257 Signed-off-by: Nir Soffer --- test/addons/csi-addons/start | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/addons/csi-addons/start b/test/addons/csi-addons/start index 90ea5776a..ef2a0b0cf 100755 --- a/test/addons/csi-addons/start +++ b/test/addons/csi-addons/start @@ -9,7 +9,7 @@ import sys import drenv from drenv import kubectl -VERSION = "v0.7.0" +VERSION = "v0.8.0" BASE_URL = f"https://raw.githubusercontent.com/csi-addons/kubernetes-csi-addons/{VERSION}/deploy/controller" From c98cd121103c40451988b845556eee1bbb3a286d Mon Sep 17 00:00:00 2001 From: Abhijeet Shakya Date: Mon, 1 Apr 2024 20:28:52 +0530 Subject: [PATCH 10/10] Updating docs to give install instructions for virtctl v1.2.0 Signed-off-by: Abhijeet Shakya --- docs/user-quick-start.md | 17 ++++++----------- test/README.md | 12 +++++++++--- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/user-quick-start.md b/docs/user-quick-start.md index e6216c318..8f39a3ffa 100644 --- a/docs/user-quick-start.md +++ b/docs/user-quick-start.md @@ -149,22 +149,17 @@ enough resources: for the details. Tested with version v1.12.0. -1. Install the `virtctl` tool. See - [virtctl install](https://kubevirt.io/quickstart_minikube/#virtctl) - for the details. - Tested with version v1.0.1. +1. Install the `virtctl` tool. ``` - curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.0.1/virtctl-v1.0.1-linux-amd64 - ``` - - After download completes for `virtctl` issue these commands. - - ``` - chmod +x virtctl + curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 sudo install virtctl /usr/local/bin + rm virtctl ``` + For more info see + [virtctl install](https://kubevirt.io/quickstart_minikube/#virtctl) + 1. Install `mc` tool ``` diff --git a/test/README.md b/test/README.md index 473433d0e..cb5cf95b8 100644 --- a/test/README.md +++ b/test/README.md @@ -58,10 +58,16 @@ environment. See [Installing Helm](https://helm.sh/docs/intro/install/) for other options. Tested with version v3.11. -1. Install the `virtctl` tool. See +1. Install the `virtctl` tool + + ``` + curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 + sudo install virtctl /usr/local/bin + rm virtctl + ``` + + For more info see [virtctl install](https://kubevirt.io/quickstart_minikube/#virtctl) - for the details. - Tested with version v1.0.1. 1. Install `mc` tool