From 664cf38345ef286ae5f4c3f7407a109fc33d0dbe Mon Sep 17 00:00:00 2001
From: Konstantin Yarovoy
Date: Mon, 15 Jul 2024 15:00:09 +0000
Subject: [PATCH] airgap: Remove airgap mode
Remove all code connected to airgap mode and module,
"offline" parameter, "input_file" and "output_file" arguments.
Related spec tests and documentation is also removed.
Refs: #2104
Signed-off-by: Konstantin Yarovoy
---
.gitignore | 7 -
AIRGAP.md | 57 ----
INSTALL.md | 35 --
shard.lock | 4 -
shard.yml | 2 -
spec/airgap_task_spec.cr | 8 -
spec/prereqs_spec.cr | 9 -
spec/setup_spec.cr | 1 -
spec/utils/k8s_instrumentation_spec.cr | 1 -
src/tasks/airgap_task.cr | 34 --
src/tasks/chaos_mesh_setup.cr | 22 +-
src/tasks/cluster_setup.cr | 1 -
src/tasks/cnf_setup.cr | 37 +--
src/tasks/constants.cr | 1 -
src/tasks/kind_setup.cr | 39 +--
src/tasks/kubescape_setup.cr | 24 +-
src/tasks/litmus_setup.cr | 43 +--
src/tasks/opa_setup.cr | 30 +-
src/tasks/platform/observability.cr | 12 -
src/tasks/platform/security.cr | 1 -
src/tasks/prereqs.cr | 19 +-
src/tasks/setup.cr | 15 +-
src/tasks/sonobuoy_setup.cr | 38 +--
src/tasks/utils/apisnoop.cr | 2 +-
src/tasks/utils/cnf_manager.cr | 159 ++--------
src/tasks/utils/cnf_manager_airgap.cr | 187 -----------
src/tasks/utils/config.cr | 15 +-
src/tasks/utils/generate_config.cr | 17 +-
src/tasks/utils/image_prepull.cr | 36 ---
src/tasks/workload/compatibility.cr | 86 ++---
src/tasks/workload/configuration.cr | 14 +-
src/tasks/workload/observability.cr | 5 -
src/tasks/workload/reliability.cr | 212 +++++--------
src/tasks/workload/security.cr | 20 --
src/tasks/workload/state.cr | 37 +--
utils/airgap/airgap.cr | 405 ------------------------
utils/airgap/cri-tools-template.yml.ecr | 40 ---
utils/airgap/shard.yml | 42 ---
utils/airgap/spec/airgap_spec.cr | 179 -----------
utils/airgap/spec/spec_helper.cr | 8 -
40 files changed, 224 insertions(+), 1680 deletions(-)
delete mode 100644 AIRGAP.md
delete mode 100644 spec/airgap_task_spec.cr
delete mode 100644 src/tasks/airgap_task.cr
delete mode 100644 src/tasks/utils/cnf_manager_airgap.cr
delete mode 100644 src/tasks/utils/image_prepull.cr
delete mode 100644 utils/airgap/airgap.cr
delete mode 100644 utils/airgap/cri-tools-template.yml.ecr
delete mode 100644 utils/airgap/shard.yml
delete mode 100644 utils/airgap/spec/airgap_spec.cr
delete mode 100644 utils/airgap/spec/spec_helper.cr
diff --git a/.gitignore b/.gitignore
index a52ea7611..6e00b9f67 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,7 +13,6 @@
/tools/helm
/tools/sonobuoy
/tools/cluster-api
-/tools/airgapped_kind/kind_node/node.tar.gz
/tools/dockerd-manifest.yml
admin.conf
cnf-testsuite
@@ -25,7 +24,6 @@ cnf-testsuite*.tar*
*.tar
reasonable_startup_orig.yml
reasonable_startup_test.yml
-cri_tools.yml
ephemeral_env
kubeconfig.conf
linux-amd64/
@@ -41,13 +39,8 @@ chaos_container_kill.yml
/.idea/
*.log
points.yml
-/tools/cri-tools-manifest.yml
-cri-tools-manifest.yml
containerd-*-linux-amd64.tar.gz
-crictl-*-linux-amd64.tar.gz
tools/LICENSE
-airgapped.tar.gz
-airgaptest.tar.gz
/cnf-testsuite.yml
/*.tgz
/enforce-image-tag.yml
diff --git a/AIRGAP.md b/AIRGAP.md
deleted file mode 100644
index 2ba1d84d7..000000000
--- a/AIRGAP.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# Air-gap documentation
-
-#### Overview of the air-gap installation steps
-
-The following install instructions will create a tarball of all the necessary cnf-testsuite components which will need to be copied to the airgapped environment. You can then run the `setup` command to bootstrap the K8s cluster and install the cnf-testsuite components onto your K8s cluster. Optionally you can use the cnf_setup command to create or add to an existing tarball created with the required components for your own CNF.
-
-#### Quick install steps for bootstrapping an air-gapped environment
-
-1. The first step requires internet access, which creates the initial tarball of the required components for cnf-testsuite:
-```
-./cnf-testsuite airgapped output-file=/tmp/airgapped.tar.gz
-```
-2. The next step after the airgapped.tar.gz is copied to your air-gapped environment host that has kubectl access will setup cnf-testsuite (offline without internet):
-```
-./cnf-testsuite setup offline=/tmp/airgapped.tar.gz
-
-# To run the set suite in air-gapped mode
-./cnf-testsuite workload offline=true
-```
-
-#### Quick install steps for CNF install (optional)
-
-1. This requires internet access, which pulls down necessary components the CNF requires. This also assumes your airgapped k8s has already been bootstrapped (previous quick install steps):
-
-`./cnf-testsuite cnf_setup cnf-config=example-cnfs/coredns/cnf-testsuite.yml output-file=/tmp/cnf.tar.gz`
-
-2. In the air-gapped environment (offline without internet access) after copying the tarball, the following command will setup the CNF:
-
-`./cnf-testsuite cnf_setup cnf-config=example-cnfs/coredns/cnf-testsuite.yml input-file=/tmp/cnf.tar.gz`
-
-#### Detailed explanation of the air-gap process
-
-**Step 1:** The air-gap process starts out downloading the prerequisites for bootstrapping the airgapped cluster, the upstream testing tools, and CNFs into a tarball. It does this by:
-* Tarballing any cnf-testsuite internal tools
-* Tarballing the upstream projects' docker images
-
-When installing the upstream projects and/or the cnfs, there are three styles of installation into K8s that have to be managed:
-
-* **K8s Installation methods**
- * **Helm charts** must be downloaded into a tarball so that they can be executed without accessing a remote helm repository.
- * The air-gap process needs to inspect the helm chart tarball and then extract the referenced docker images into docker image tarballs.
- * **Helm directories**
- * The air-gap process needs to inspect the helm chart directory yaml files and extract the referenced docker images into docker image tarballs.
- * **Manifest directories**
- * As in the helm directory process, a manifest directory must have all of the docker images that are referenced in its yaml files tarballed into valid docker image tarballs.
-
-**Step 2:** The tarball that was created in step 1 needs to be **copied to the air-gapped environment**. The cnf-testsuite executable will need to be copied into the air-gapped environment as well as the cnf-testsuite.yml config files, and any other files needed for the managing a specific CNF.
-
-**Step 3:** **The bootstrapping process** installs the cnf-testsuite bootstrapping tools on each schedulable node. It does this by first finding an image that already exists on each node and then creates a DaemonSet named "cri-tools" using the found image. It then copies the cri and ctr binaries into the DaemonSet pods.
-
-**Step 4:** **The image caching** process uses the cri-tools to cache the saved docker images onto each node. It achieves this by utilizing the kubectl cp command to copy the tarball onto all schedulable nodes and uses the docker client to load and then cache the images.
-
-**Step 5:** **The install tools** step installs all of the prerequisite tools (using helm, helm directories, or manifest files) that the cnf-testsuite requires for each node.
-
-**Step 6: (optional)** The install CNF (applications) step installs a CNF using the cnf_setup command combined with a user-provided cnf-testuite config file with a helm chart, helm directory, or manifest file.
-
-Note: In order for images to be deployed into an air-gapped enviroment, the images in the helm chart or manifest file need to be set to a specific version (otherwise the image pull policy will force a retrieval of the image which it will not be able to pull).
diff --git a/INSTALL.md b/INSTALL.md
index 62f2072be..2d2af333b 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -129,31 +129,6 @@ This should build a cnf-testsuite binary in the root directory of the git repo c
-#### Air-Gapped
-
-The CNF-TestSuite has the ability to install in an air-gapped environment. A tarball with upstream tools can be created from a source installation, or downloaded from the binaries of the release.
-
-You can read more about the air-gap process by reading the [AIRGAP detailed documentation](AIRGAP.md).
-
- Click here for brief air-gap install details
-
-
-Prerequite: Follow the source install instructions to create a working binary which will generate the air-gapped tarball.
-
-Follow these steps to create an air-gap tarball and to bootstrap the cluster with the tarball:
-
-```
-./cnf-testsuite airgapped output-file=./tmp/airgapped.tar.gz
-./cnf-testsuite setup offline=./tmp/airgapped.tar.gz
-
-# To run the set suite in air-gapped mode
-./cnf-testsuite workload offline=true
-```
-This should create a bootstrapped cluster with the upstream tools necessary for the cnf-testsuite.
-
-
-
-
### Preparation
Now that you have cnf-testsuite installed, we need to prepare the suite.
@@ -227,16 +202,6 @@ If you've followed the [CNF_TESTSUITE_YML_USAGE.md](CNF_TESTSUITE_YML_USAGE.md)
```
cnf-testsuite cnf_setup cnf-config=./cnf-testsuite.yml
```
-#### Installing a CNF in Airgapped mode
-
-To create a tarball of a cnf that can be copied into the airgapped environment:
-```
-cnf-testsuite cnf_setup cnf-config=./cnf-testsuite.yml output-file=/tmp/mycnf.tar
-```
-To install a cnf from a tarball into the airgapped environment:
-```
-cnf-testsuite cnf_setup cnf-config=./cnf-testsuite.yml input-file=/tmp/mycnf.tar
-```
### Running cnf-testsuite for the first time
diff --git a/shard.lock b/shard.lock
index ab2f1dff1..3a2791320 100644
--- a/shard.lock
+++ b/shard.lock
@@ -1,9 +1,5 @@
version: 2.0
shards:
- airgap:
- path: utils/airgap
- version: 0.1.0
-
ameba:
git: https://github.com/crystal-ameba/ameba.git
version: 1.3.1
diff --git a/shard.yml b/shard.yml
index 47e5f7b12..4afa99615 100644
--- a/shard.yml
+++ b/shard.yml
@@ -68,8 +68,6 @@ dependencies:
release_manager:
github: cnf-testsuite/release_manager
branch: main
- airgap:
- path: utils/airgap
retriable:
github: Sija/retriable.cr
protobuf:
diff --git a/spec/airgap_task_spec.cr b/spec/airgap_task_spec.cr
deleted file mode 100644
index 5dce17212..000000000
--- a/spec/airgap_task_spec.cr
+++ /dev/null
@@ -1,8 +0,0 @@
-require "./spec_helper"
-require "colorize"
-require "../src/tasks/utils/utils.cr"
-require "file_utils"
-require "sam"
-
-describe "AirGap" do
-end
diff --git a/spec/prereqs_spec.cr b/spec/prereqs_spec.cr
index e0eb1ed86..a9c6a37a0 100644
--- a/spec/prereqs_spec.cr
+++ b/spec/prereqs_spec.cr
@@ -14,13 +14,4 @@ describe "Prereq" do
(/kubectl found/ =~ result[:output]).should_not be_nil
(/git found/ =~ result[:output]).should_not be_nil
end
-
- it "'prereq' with offline option should check the system for prerequisites except git", tags: ["points"] do
- result = ShellCmd.run_testsuite("prereqs verbose offline=1")
- result[:status].success?.should be_true
- (/helm found/ =~ result[:output]).should_not be_nil
-
- (/kubectl found/ =~ result[:output]).should_not be_nil
- (/git found/ =~ result[:output]).should be_nil
- end
end
diff --git a/spec/setup_spec.cr b/spec/setup_spec.cr
index 5b1940790..abfcd200a 100644
--- a/spec/setup_spec.cr
+++ b/spec/setup_spec.cr
@@ -1,7 +1,6 @@
require "./spec_helper"
require "colorize"
require "../src/tasks/utils/utils.cr"
-require "airgap"
require "kubectl_client"
require "helm"
require "file_utils"
diff --git a/spec/utils/k8s_instrumentation_spec.cr b/spec/utils/k8s_instrumentation_spec.cr
index 4557a79d8..3a4cda09e 100644
--- a/spec/utils/k8s_instrumentation_spec.cr
+++ b/spec/utils/k8s_instrumentation_spec.cr
@@ -1,5 +1,4 @@
require "../spec_helper"
-require "airgap"
require "kubectl_client"
require "../../src/tasks/utils/k8s_instrumentation.cr"
require "file_utils"
diff --git a/src/tasks/airgap_task.cr b/src/tasks/airgap_task.cr
deleted file mode 100644
index cb91abc2b..000000000
--- a/src/tasks/airgap_task.cr
+++ /dev/null
@@ -1,34 +0,0 @@
-require "sam"
-require "file_utils"
-require "colorize"
-require "totem"
-
-desc "Sets up an airgapped tarball"
-task "airgapped", do |t, args|
- t.invoke("install_kubescape")
- #./cnf-testsuite setup --offline=./airgapped.tar.gz
- #./cnf-testsuite airgapped -o ~/airgapped.tar.gz
- #./cnf-testsuite offline of=~/airgapped.tar.gz
- #./cnf-testsuite offline output-file=~/mydir/airgapped.tar.gz
- output_file = args.named["output-file"].as(String) if args.named["output-file"]?
- output_file = args.named["of"].as(String) if args.named["of"]?
- if output_file && !output_file.empty?
- # todo check if file exists
- CNFManager::CNFAirGap.generate(output_file)
- else
- CNFManager::CNFAirGap.generate()
- end
- stdout_success "Airgap setup complete"
-end
-
-desc "Extracts an airgapped tarball"
-task "extract", do |_, args|
- input_file = args.named["input-file"].as(String) if args.named["input-file"]?
- input_file = args.named["if"].as(String) if args.named["of"]?
- if input_file && !input_file.empty?
- AirGap.extract(input_file)
- else
- AirGap.extract()
- end
-end
-
diff --git a/src/tasks/chaos_mesh_setup.cr b/src/tasks/chaos_mesh_setup.cr
index b5de01180..a91e38dab 100644
--- a/src/tasks/chaos_mesh_setup.cr
+++ b/src/tasks/chaos_mesh_setup.cr
@@ -3,30 +3,20 @@ require "file_utils"
require "colorize"
require "totem"
require "./utils/utils.cr"
-# require "./utils/tar.cr"
require "tar"
CHAOS_MESH_VERSION = "v0.8.0"
-CHAOS_MESH_OFFLINE_DIR = "#{TarClient::TAR_REPOSITORY_DIR}/chaos-mesh_chaos-mesh"
desc "Install Chaos Mesh"
task "install_chaosmesh" do |_, args|
Log.for("verbose").info { "install_chaosmesh" } if check_verbose(args)
current_dir = FileUtils.pwd
- helm = Helm::BinarySingleton.helm
- # KubectlClient::Apply.file("https://raw.githubusercontent.com/chaos-mesh/chaos-mesh/#{CHAOS_MESH_VERSION}/manifests/crd.yaml")
-
- if args.named["offline"]?
- Log.info { "install chaos mesh offline mode" }
- helm_chart = Dir.entries(CHAOS_MESH_OFFLINE_DIR).first
- Helm.install("my-chaos-mesh #{CHAOS_MESH_OFFLINE_DIR}/#{helm_chart} --version 0.5.1")
-
- else
- # `helm repo add chaos-mesh https://charts.chaos-mesh.org`
- # `helm install my-chaos-mesh chaos-mesh/chaos-mesh --version 0.5.1`
- Helm.helm_repo_add("chaos-mesh","https://charts.chaos-mesh.org")
- Helm.install("my-chaos-mesh chaos-mesh/chaos-mesh --version 0.5.1")
- end
+ helm = Helm::BinarySingleton.helm
+ # KubectlClient::Apply.file("https://raw.githubusercontent.com/chaos-mesh/chaos-mesh/#{CHAOS_MESH_VERSION}/manifests/crd.yaml")
+ # `helm repo add chaos-mesh https://charts.chaos-mesh.org`
+ # `helm install my-chaos-mesh chaos-mesh/chaos-mesh --version 0.5.1`
+ Helm.helm_repo_add("chaos-mesh","https://charts.chaos-mesh.org")
+ Helm.install("my-chaos-mesh chaos-mesh/chaos-mesh --version 0.5.1")
File.write("chaos_network_loss.yml", CHAOS_NETWORK_LOSS)
File.write("chaos_cpu_hog.yml", CHAOS_CPU_HOG)
diff --git a/src/tasks/cluster_setup.cr b/src/tasks/cluster_setup.cr
index d2877d48f..730708458 100644
--- a/src/tasks/cluster_setup.cr
+++ b/src/tasks/cluster_setup.cr
@@ -6,7 +6,6 @@ require "totem"
require "./utils/utils.cr"
# CHAOS_MESH_VERSION = "v0.8.0"
-# CHAOS_MESH_OFFLINE_DIR = "#{TarClient::TAR_REPOSITORY_DIR}/chaos-mesh_chaos-mesh"
desc "Install CNF Test Suite Cluster Tools"
task "install_cluster_tools" do |_, args|
diff --git a/src/tasks/cnf_setup.cr b/src/tasks/cnf_setup.cr
index ac79f2897..e9cd5da04 100644
--- a/src/tasks/cnf_setup.cr
+++ b/src/tasks/cnf_setup.cr
@@ -8,39 +8,15 @@ task "cnf_setup", ["helm_local_install", "create_namespace"] do |_, args|
Log.for("verbose").info { "cnf_setup" } if check_verbose(args)
Log.for("verbose").debug { "args = #{args.inspect}" } if check_verbose(args)
cli_hash = CNFManager.sample_setup_cli_args(args)
- output_file = cli_hash[:output_file]
- input_file = cli_hash[:input_file]
config_file = cli_hash[:config_file]
- if output_file && !output_file.empty?
- puts "cnf tarball generation mode".colorize(:green)
- tar_info = CNFManager::CNFAirGap.generate_cnf_setup(cli_hash[:config_file], output_file, cli_hash)
- puts "cnf tarball generation mode complete".colorize(:green)
- elsif input_file && !input_file.empty?
- puts "cnf setup airgapped mode".colorize(:green)
- AirGap.extract(input_file)
- puts "cnf setup caching images on nodes (airgapped mode)".colorize(:green)
- install_method = CNFManager::Config.install_method_by_config_file(config_file)
- config_src = CNFManager::Config.config_src_by_config_file(config_file)
- release_name = CNFManager::Config.release_name_by_config_file(config_file)
- if config_file && !AirGap.image_pull_policy_config_file?(install_method, config_src, release_name)
- puts "Some containers within the installation manifests do not have an image pull policy defined. Airgap mode will not work until this is fixed.".colorize(:red)
- exit 1
- end
- AirGap.cache_images(cnf_setup: true)
- puts "cnf setup finished caching images on nodes (airgapped mode)".colorize(:green)
- CNFManager.sample_setup(cli_hash)
- puts "cnf setup airgapped mode complete".colorize(:green)
+ if ClusterTools.install
+ puts "ClusterTools installed".colorize(:green)
else
- Log.info { "Installing ClusterTools"}
- if ClusterTools.install
- puts "ClusterTools installed".colorize(:green)
- else
- puts "The ClusterTools installation timed out. Please check the status of the cluster-tools pods.".colorize(:red)
- end
- puts "cnf setup online mode".colorize(:green)
- CNFManager.sample_setup(cli_hash)
- puts "cnf setup online mode complete".colorize(:green)
+ puts "The ClusterTools installation timed out. Please check the status of the cluster-tools pods.".colorize(:red)
end
+ puts "cnf setup start".colorize(:green)
+ CNFManager.sample_setup(cli_hash)
+ puts "cnf setup complete".colorize(:green)
end
task "cnf_cleanup" do |_, args|
@@ -88,7 +64,6 @@ task "generate_config" do |_, args|
config_src = args.named["config-src"].as(String)
output_file = args.named["output-file"].as(String) if args.named["output-file"]?
output_file = args.named["of"].as(String) if args.named["of"]?
- #TODO make this work in airgapped mode
if output_file && !output_file.empty?
Log.info { "generating config with an output file" }
CNFManager::GenerateConfig.generate_config(config_src, output_file)
diff --git a/src/tasks/constants.cr b/src/tasks/constants.cr
index c7c042947..5c8788c92 100644
--- a/src/tasks/constants.cr
+++ b/src/tasks/constants.cr
@@ -4,7 +4,6 @@ ESSENTIAL_PASSED_THRESHOLD = 15
CNF_DIR = "cnfs"
CONFIG_FILE = "cnf-testsuite.yml"
BASE_CONFIG = "./config.yml"
-OFFLINE_MANIFESTS_PATH = "/tmp/manifests"
PASSED = "passed"
FAILED = "failed"
SKIPPED = "skipped"
diff --git a/src/tasks/kind_setup.cr b/src/tasks/kind_setup.cr
index 68ad3aa19..3048c28f5 100644
--- a/src/tasks/kind_setup.cr
+++ b/src/tasks/kind_setup.cr
@@ -13,29 +13,18 @@ task "install_kind" do |_, args|
FileUtils.mkdir_p("#{tools_path}/kind")
write_file = "#{tools_path}/kind/kind"
Log.info { "write_file: #{write_file}" }
- if args.named["offline"]?
- Log.info { "install kind offline mode" }
- FileUtils.cp("#{TarClient::TAR_DOWNLOAD_DIR}/kind", "#{write_file}")
+ Log.info { "install kind" }
+ url = "https://github.com/kubernetes-sigs/kind/releases/download/v#{KIND_VERSION}/kind-linux-amd64"
+ Log.info { "url: #{url}" }
+ do_this_on_each_retry = ->(ex : Exception, attempt : Int32, elapsed_time : Time::Span, next_interval : Time::Span) do
+ Log.info { "#{ex.class}: '#{ex.message}' - #{attempt} attempt in #{elapsed_time} seconds and #{next_interval} seconds until the next try."}
+ end
+ Retriable.retry(on_retry: do_this_on_each_retry, times: 3, base_interval: 1.second) do
+ HttpHelper.download("#{url}","#{write_file}")
stderr = IO::Memory.new
status = Process.run("chmod +x #{write_file}", shell: true, output: stderr, error: stderr)
success = status.success?
raise "Unable to make #{write_file} executable" if success == false
- else
- Log.info { "install kind online mode" }
- url = "https://github.com/kubernetes-sigs/kind/releases/download/v#{KIND_VERSION}/kind-linux-amd64"
- Log.info { "url: #{url}" }
- do_this_on_each_retry = ->(ex : Exception, attempt : Int32, elapsed_time : Time::Span, next_interval : Time::Span) do
- Log.info { "#{ex.class}: '#{ex.message}' - #{attempt} attempt in #{elapsed_time} seconds and #{next_interval} seconds until the next try."}
- end
- Retriable.retry(on_retry: do_this_on_each_retry, times: 3, base_interval: 1.second) do
-
- HttpHelper.download("#{url}","#{write_file}")
-
- stderr = IO::Memory.new
- status = Process.run("chmod +x #{write_file}", shell: true, output: stderr, error: stderr)
- success = status.success?
- raise "Unable to make #{write_file} executable" if success == false
- end
end
end
end
@@ -68,7 +57,7 @@ class KindManager
#totod make a create cluster with flannel
- def create_cluster(name : String, kind_config : String?, offline : Bool, k8s_version = "1.21.1") : KindManager::Cluster?
+ def create_cluster(name : String, kind_config : String?, k8s_version = "1.21.1") : KindManager::Cluster?
Log.info { "Creating Kind Cluster" }
kubeconfig = "#{tools_path}/kind/#{name}_admin.conf"
Log.for("kind_kubeconfig").info { kubeconfig }
@@ -83,11 +72,7 @@ class KindManager
# * Add --verbosity 100 to debug kind issues.
# * Use --retain to retain cluster incase there is an error with creation.
cmd = "#{kind} create cluster --name #{name} #{kind_config_opt} --image kindest/node:v#{k8s_version} --kubeconfig #{kubeconfig}"
- if offline
- ShellCmd.run(cmd, "KindManager#create_cluster(offline)")
- else
- ShellCmd.run(cmd, "KindManager#create_cluster(online)")
- end
+ ShellCmd.run(cmd, "KindManager#create_cluster")
end
return KindManager::Cluster.new(name, kubeconfig)
@@ -112,9 +97,9 @@ class KindManager
kind_config
end
- def self.create_cluster_with_chart_and_wait(name, kind_config, chart_opts, offline) : KindManager::Cluster
+ def self.create_cluster_with_chart_and_wait(name, kind_config, chart_opts) : KindManager::Cluster
manager = KindManager.new
- cluster = manager.create_cluster(name, kind_config, offline)
+ cluster = manager.create_cluster(name, kind_config)
Helm.install("#{name}-plugin #{chart_opts} --namespace kube-system --kubeconfig #{cluster.kubeconfig}")
cluster.wait_until_pods_ready()
cluster
diff --git a/src/tasks/kubescape_setup.cr b/src/tasks/kubescape_setup.cr
index 4d1a29193..c18781555 100644
--- a/src/tasks/kubescape_setup.cr
+++ b/src/tasks/kubescape_setup.cr
@@ -16,30 +16,18 @@ task "install_kubescape", ["kubescape_framework_download"] do |_, args|
if !File.exists?("#{tools_path}/kubescape/kubescape") || installed_kubescape_version != KUBESCAPE_VERSION
write_file = "#{tools_path}/kubescape/kubescape"
Log.info { "write_file: #{write_file}" }
- if args.named["offline"]?
- Log.info { "kubescape install offline mode" }
- `cp #{TarClient::TAR_DOWNLOAD_DIR}/kubescape-ubuntu-latest #{write_file}`
- `cp #{TarClient::TAR_DOWNLOAD_DIR}/nsa.json #{tools_path}/kubescape/`
+ Log.info { "kubescape install" }
+ url = "https://github.com/armosec/kubescape/releases/download/v#{KUBESCAPE_VERSION}/kubescape-ubuntu-latest"
+ Log.info { "url: #{url}" }
+ Retriable.retry do
+ HttpHelper.download("#{url}","#{write_file}")
stderr = IO::Memory.new
status = Process.run("chmod +x #{write_file}", shell: true, output: stderr, error: stderr)
success = status.success?
raise "Unable to make #{write_file} executable" if success == false
- else
- Log.info { "kubescape install online mode" }
- url = "https://github.com/armosec/kubescape/releases/download/v#{KUBESCAPE_VERSION}/kubescape-ubuntu-latest"
- Log.info { "url: #{url}" }
- Retriable.retry do
-
- HttpHelper.download("#{url}","#{write_file}")
-
- stderr = IO::Memory.new
- status = Process.run("chmod +x #{write_file}", shell: true, output: stderr, error: stderr)
- success = status.success?
- raise "Unable to make #{write_file} executable" if success == false
- end
end
- # Irrespective of online or offline mode, write the version file with the kubescape version
+ # Write the version file with the kubescape version
File.write(version_file, KUBESCAPE_VERSION)
end
end
diff --git a/src/tasks/litmus_setup.cr b/src/tasks/litmus_setup.cr
index 4774910c0..9ad4107d6 100644
--- a/src/tasks/litmus_setup.cr
+++ b/src/tasks/litmus_setup.cr
@@ -7,21 +7,14 @@ require "./utils/utils.cr"
desc "Install LitmusChaos"
task "install_litmus" do |_, args|
- if args.named["offline"]?
- Log.info {"install litmus offline mode"}
- AirGap.image_pull_policy(LitmusManager::OFFLINE_LITMUS_OPERATOR)
- KubectlClient::Apply.file(LitmusManager::OFFLINE_LITMUS_OPERATOR)
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/chaos_crds.yaml")
- else
- #todo in resilience node_drain task
- #todo get node name
- #todo download litmus file then modify it with add_node_selector
- #todo apply modified litmus file
- Log.info { "install litmus online mode" }
- Log.info { "install litmus operator"}
- KubectlClient::Apply.namespace(LitmusManager::LITMUS_NAMESPACE)
- KubectlClient::Apply.file(LitmusManager::ONLINE_LITMUS_OPERATOR)
- end
+ #todo in resilience node_drain task
+ #todo get node name
+ #todo download litmus file then modify it with add_node_selector
+ #todo apply modified litmus file
+ Log.info { "install litmus" }
+ KubectlClient::Apply.namespace(LitmusManager::LITMUS_NAMESPACE)
+ Log.info { "install litmus operator"}
+ KubectlClient::Apply.file(LitmusManager::LITMUS_OPERATOR)
end
desc "Uninstall LitmusChaos"
@@ -33,12 +26,7 @@ task "uninstall_litmus" do |_, args|
output: stdout = IO::Memory.new,
error: stderr = IO::Memory.new
)
- if args.named["offline"]?
- Log.info { "install litmus offline mode" }
- KubectlClient::Delete.file("#{OFFLINE_MANIFESTS_PATH}/litmus-operator-v#{LitmusManager::Version}.yaml")
- else
- KubectlClient::Delete.file("https://litmuschaos.github.io/litmus/litmus-operator-v#{LitmusManager::Version}.yaml")
- end
+ KubectlClient::Delete.file("https://litmuschaos.github.io/litmus/litmus-operator-v#{LitmusManager::Version}.yaml")
Log.info { "#{stdout}" if check_verbose(args) }
Log.info { "#{stderr}" if check_verbose(args) }
end
@@ -50,10 +38,8 @@ module LitmusManager
# Version = "1.13.8"
# Version = "3.0.0-beta12"
NODE_LABEL = "kubernetes.io/hostname"
- OFFLINE_LITMUS_OPERATOR = "#{OFFLINE_MANIFESTS_PATH}/litmus-operator-v#{LitmusManager::Version}.yaml"
#https://raw.githubusercontent.com/litmuschaos/chaos-operator/v2.14.x/deploy/operator.yaml
- # ONLINE_LITMUS_OPERATOR = "https://litmuschaos.github.io/litmus/litmus-operator-v#{LitmusManager::Version}.yaml"
- ONLINE_LITMUS_OPERATOR = "https://litmuschaos.github.io/litmus/litmus-operator-v#{LitmusManager::Version}.yaml"
+ LITMUS_OPERATOR = "https://litmuschaos.github.io/litmus/litmus-operator-v#{LitmusManager::Version}.yaml"
# for node drain
DOWNLOADED_LITMUS_FILE = "litmus-operator-downloaded.yaml"
MODIFIED_LITMUS_FILE = "litmus-operator-modified.yaml"
@@ -62,13 +48,8 @@ module LitmusManager
- def self.add_node_selector(node_name, airgap=false )
- if airgap
- file = File.read(OFFLINE_LITMUS_OPERATOR)
-
- else
- file = File.read(DOWNLOADED_LITMUS_FILE)
- end
+ def self.add_node_selector(node_name)
+ file = File.read(DOWNLOADED_LITMUS_FILE)
deploy_index = file.index("kind: Deployment") || 0
spec_literal = "spec:"
template = "\n nodeSelector:\n kubernetes.io/hostname: #{node_name}"
diff --git a/src/tasks/opa_setup.cr b/src/tasks/opa_setup.cr
index 69fe1af3c..94d129720 100644
--- a/src/tasks/opa_setup.cr
+++ b/src/tasks/opa_setup.cr
@@ -4,9 +4,6 @@ require "colorize"
require "totem"
require "./utils/utils.cr"
-
-OPA_OFFLINE_DIR = "#{TarClient::TAR_REPOSITORY_DIR}/gatekeeper_gatekeeper"
-
desc "Sets up OPA in the K8s Cluster"
task "install_opa", ["helm_local_install", "create_namespace"] do |_, args|
helm_install_args_list = [
@@ -22,23 +19,18 @@ task "install_opa", ["helm_local_install", "create_namespace"] do |_, args|
helm_install_args = helm_install_args_list.join(" ")
- if args.named["offline"]?
- LOGGING.info "Intalling OPA Gatekeeper in Offline Mode"
- chart = Dir.entries(OPA_OFFLINE_DIR).first
- Helm.install("#{helm_install_args} opa-gatekeeper #{OPA_OFFLINE_DIR}/#{chart}")
- else
- Helm.helm_repo_add("gatekeeper", "https://open-policy-agent.github.io/gatekeeper/charts")
- begin
- Helm.install("#{helm_install_args} opa-gatekeeper gatekeeper/gatekeeper")
- rescue e : Helm::CannotReuseReleaseNameError
- stdout_warning "gatekeeper already installed"
- end
+ Helm.helm_repo_add("gatekeeper", "https://open-policy-agent.github.io/gatekeeper/charts")
+ begin
+ Helm.install("#{helm_install_args} opa-gatekeeper gatekeeper/gatekeeper")
+ rescue e : Helm::CannotReuseReleaseNameError
+ stdout_warning "gatekeeper already installed"
end
- File.write("enforce-image-tag.yml", ENFORCE_IMAGE_TAG)
- File.write("constraint_template.yml", CONSTRAINT_TEMPLATE)
- KubectlClient::Apply.file("constraint_template.yml")
- KubectlClient.wait("--for condition=established --timeout=300s crd/requiretags.constraints.gatekeeper.sh")
- KubectlClient::Apply.file("enforce-image-tag.yml")
+
+ File.write("enforce-image-tag.yml", ENFORCE_IMAGE_TAG)
+ File.write("constraint_template.yml", CONSTRAINT_TEMPLATE)
+ KubectlClient::Apply.file("constraint_template.yml")
+ KubectlClient.wait("--for condition=established --timeout=300s crd/requiretags.constraints.gatekeeper.sh")
+ KubectlClient::Apply.file("enforce-image-tag.yml")
end
desc "Uninstall OPA"
diff --git a/src/tasks/platform/observability.cr b/src/tasks/platform/observability.cr
index 416ea8bb6..22b74ccc8 100644
--- a/src/tasks/platform/observability.cr
+++ b/src/tasks/platform/observability.cr
@@ -20,9 +20,6 @@ namespace "platform" do
unless check_poc(args)
next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Kube State Metrics not in poc mode")
end
- if args.named["offline"]?
- next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Kube State Metrics in offline mode")
- end
Log.info { "Running POC: kube_state_metrics" }
found = KernelIntrospection::K8s.find_first_process(CloudNativeIntrospection::STATE_METRICS_PROCESS)
Log.info { "Found Pod: #{found}" }
@@ -41,9 +38,6 @@ namespace "platform" do
unless check_poc(args)
next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "node exporter not in poc mode")
end
- if args.named["offline"]?
- next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "node exporter in offline mode")
- end
Log.info { "Running POC: node_exporter" }
found = KernelIntrospection::K8s.find_first_process(CloudNativeIntrospection::NODE_EXPORTER)
Log.info { "Found Process: #{found}" }
@@ -62,9 +56,6 @@ namespace "platform" do
unless check_poc(args)
next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "prometheus adapter not in poc mode")
end
- if args.named["offline"]?
- next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "prometheus adapter in offline mode")
- end
Log.info { "Running POC: prometheus_adapter" }
found = KernelIntrospection::K8s.find_first_process(CloudNativeIntrospection::PROMETHEUS_ADAPTER)
Log.info { "Found Process: #{found}" }
@@ -83,9 +74,6 @@ namespace "platform" do
unless check_poc(args)
next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Metrics server not in poc mode")
end
- if args.named["offline"]?
- next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "Metrics server in offline mode")
- end
Log.info { "Running POC: metrics_server" }
found = KernelIntrospection::K8s.find_first_process(CloudNativeIntrospection::METRICS_SERVER)
if found
diff --git a/src/tasks/platform/security.cr b/src/tasks/platform/security.cr
index 61a40e5ed..9fbcd7243 100644
--- a/src/tasks/platform/security.cr
+++ b/src/tasks/platform/security.cr
@@ -29,7 +29,6 @@ namespace "platform" do
desc "Attackers who have Cluster-admin permissions (can perform any action on any resource), can take advantage of their high privileges for malicious intentions. Determines which subjects have cluster admin permissions."
task "cluster_admin", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
CNFManager::Task.task_runner(args, task: t, check_cnf_installed: false) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Administrative Roles")
diff --git a/src/tasks/prereqs.cr b/src/tasks/prereqs.cr
index 675280002..e1e35bcec 100644
--- a/src/tasks/prereqs.cr
+++ b/src/tasks/prereqs.cr
@@ -8,25 +8,18 @@ require "helm"
require "./utils/system_information/clusterctl.cr"
task "prereqs" do |_, args|
- offline_mode = false
- offline_mode = true if args.named["offline"]?
-
verbose = check_verbose(args)
- helm_condition = Helm::SystemInfo.helm_installation_info(verbose) && !Helm.helm_gives_k8s_warning?(true)
-
- kubectl_existance = KubectlClient.installation_found?(verbose, offline_mode)
+ helm_ok = Helm::SystemInfo.helm_installation_info(verbose) && !Helm.helm_gives_k8s_warning?(true)
+ kubectl_ok = KubectlClient.installation_found?(verbose)
+ git_ok = GitClient.installation_found?
checks = [
- helm_condition,
- kubectl_existance,
+ helm_ok,
+ kubectl_ok,
+ git_ok
]
- # git installation is optional for offline mode
- if !offline_mode
- checks << GitClient.installation_found?
- end
-
if checks.includes?(false)
stdout_failure "Setup failed. Some prerequisites are missing. Please install all of the prerequisites before continuing."
exit 1
diff --git a/src/tasks/setup.cr b/src/tasks/setup.cr
index 0598eb299..bf2b9801b 100644
--- a/src/tasks/setup.cr
+++ b/src/tasks/setup.cr
@@ -5,7 +5,7 @@ require "totem"
desc "Sets up the CNF test suite, the K8s cluster, and upstream projects"
-task "setup", ["version", "offline", "helm_local_install", "prereqs", "create_namespace", "configuration_file_setup", "install_apisnoop", "install_sonobuoy", "install_chart_testing", "cnf_testsuite_setup", "install_kind"] do |_, args|
+task "setup", ["version", "helm_local_install", "prereqs", "create_namespace", "configuration_file_setup", "install_apisnoop", "install_sonobuoy", "install_chart_testing", "cnf_testsuite_setup", "install_kind"] do |_, args|
stdout_success "Setup complete"
end
@@ -20,19 +20,6 @@ rescue e : KubectlClient::Create::AlreadyExistsError
stdout_success "#{TESTSUITE_NAMESPACE} namespace already exists on the Kubernetes cluster"
end
-task "offline" do |_, args|
- #./cnf-testsuite setup --offline=./airgapped.tar.gz
- #./cnf-testsuite setup --input-file=./airgapped.tar.gz
- #./cnf-testsuite setup --if=./airgapped.tar.gz
- input_file = args.named["offline"].as(String) if args.named["offline"]?
- input_file = args.named["input-file"].as(String) if args.named["input-file"]?
- input_file = args.named["if"].as(String) if args.named["if"]?
- if input_file && !input_file.empty?
- AirGap.extract(input_file)
- AirGap.cache_images()
- end
-end
-
task "configuration_file_setup" do |_, args|
Log.for("verbose").info { "configuration_file_setup" } if check_verbose(args)
CNFManager::Points.create_points_yml
diff --git a/src/tasks/sonobuoy_setup.cr b/src/tasks/sonobuoy_setup.cr
index f8af12b71..f2dc525f5 100644
--- a/src/tasks/sonobuoy_setup.cr
+++ b/src/tasks/sonobuoy_setup.cr
@@ -29,30 +29,20 @@ task "install_sonobuoy" do |_, args|
FileUtils.mkdir_p("#{tools_path}/sonobuoy")
# curl = `VERSION="#{LITMUS_K8S_VERSION}" OS=linux ; curl -L "https://github.com/vmware-tanzu/sonobuoy/releases/download/v${VERSION}/sonobuoy_${VERSION}_${OS}_amd64.tar.gz" --output #{tools_path}/sonobuoy/sonobuoy.tar.gz`
# os="linux"
- if args.named["offline"]?
- Log.info { "install sonobuoy offline mode" }
- `tar -xzf #{TarClient::TAR_DOWNLOAD_DIR}/sonobuoy.tar.gz -C #{tools_path}/sonobuoy/ && \
- chmod +x #{tools_path}/sonobuoy/sonobuoy`
- sonobuoy = "#{tools_path}/sonobuoy/sonobuoy"
- sonobuoy_details(sonobuoy) if check_verbose(args)
- else
- url = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v#{SONOBUOY_K8S_VERSION}/sonobuoy_#{SONOBUOY_K8S_VERSION}_#{SONOBUOY_OS}_amd64.tar.gz"
- write_file = "#{tools_path}/sonobuoy/sonobuoy.tar.gz"
- Log.info { "url: #{url}" }
- Log.info { "write_file: #{write_file}" }
- # todo change this to work with rel
- # todo I think http get doesn't do follows and thats why we use halite here, that's sad. Shouldn't need to do a follow to download a file though?
- # i think any url can do a redirect ....
- # it could be that http.get 'just works' now. keyword just
-
-
- HttpHelper.download("#{url}","#{write_file}")
- `tar -xzf #{tools_path}/sonobuoy/sonobuoy.tar.gz -C #{tools_path}/sonobuoy/ && \
- chmod +x #{tools_path}/sonobuoy/sonobuoy && \
- rm #{tools_path}/sonobuoy/sonobuoy.tar.gz`
- sonobuoy = "#{tools_path}/sonobuoy/sonobuoy"
- sonobuoy_details(sonobuoy) if check_verbose(args)
- end
+ url = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v#{SONOBUOY_K8S_VERSION}/sonobuoy_#{SONOBUOY_K8S_VERSION}_#{SONOBUOY_OS}_amd64.tar.gz"
+ write_file = "#{tools_path}/sonobuoy/sonobuoy.tar.gz"
+ Log.info { "url: #{url}" }
+ Log.info { "write_file: #{write_file}" }
+ # todo change this to work with rel
+ # todo I think http get doesn't do follows and thats why we use halite here, that's sad. Shouldn't need to do a follow to download a file though?
+ # i think any url can do a redirect ....
+ # it could be that http.get 'just works' now. keyword just
+ HttpHelper.download("#{url}","#{write_file}")
+ `tar -xzf #{tools_path}/sonobuoy/sonobuoy.tar.gz -C #{tools_path}/sonobuoy/ && \
+ chmod +x #{tools_path}/sonobuoy/sonobuoy && \
+ rm #{tools_path}/sonobuoy/sonobuoy.tar.gz`
+ sonobuoy = "#{tools_path}/sonobuoy/sonobuoy"
+ sonobuoy_details(sonobuoy) if check_verbose(args)
end
end
diff --git a/src/tasks/utils/apisnoop.cr b/src/tasks/utils/apisnoop.cr
index d946362e8..92903c30d 100644
--- a/src/tasks/utils/apisnoop.cr
+++ b/src/tasks/utils/apisnoop.cr
@@ -56,7 +56,7 @@ class ApiSnoop
Log.for("apisnoop_kind_dir").info { FileUtils.pwd }
ShellCmd.run("pwd", "apisnoop_setup_kind_dir", true)
kind_config = "kind+apisnoop.yaml"
- cluster = kind_manager.create_cluster(name, kind_config, false, k8s_version)
+ cluster = kind_manager.create_cluster(name, kind_config, k8s_version)
cluster.wait_until_nodes_ready()
cluster.wait_until_pods_ready()
return cluster
diff --git a/src/tasks/utils/cnf_manager.cr b/src/tasks/utils/cnf_manager.cr
index 5810a1426..aa301c95b 100644
--- a/src/tasks/utils/cnf_manager.cr
+++ b/src/tasks/utils/cnf_manager.cr
@@ -9,9 +9,7 @@ require "./points.cr"
require "./task.cr"
require "./config.cr"
require "./jaeger.cr"
-require "airgap"
require "tar"
-require "./image_prepull.cr"
require "./generate_config.cr"
require "./oran_monitor.cr"
require "log"
@@ -373,7 +371,7 @@ module CNFManager
end
end
- def self.install_method_by_config_src(config_src : String, airgapped=false, generate_tar_mode=false)
+ def self.install_method_by_config_src(config_src : String)
Log.info { "install_method_by_config_src" }
Log.info { "config_src: #{config_src}" }
helm_chart_file = "#{config_src}/#{Helm::CHART_YAML}"
@@ -385,7 +383,6 @@ module CNFManager
elsif File.exists?(helm_chart_file)
Log.info { "install_method_by_config_src helm_directory selected" }
Helm::InstallMethod::HelmDirectory
- # elsif generate_tar_mode && KubectlClient::Apply.validate(config_src) # just because we are in generate tar mode doesn't mean we have a K8s cluster
elsif Dir.exists?(config_src)
Log.info { "install_method_by_config_src manifest_directory selected" }
Helm::InstallMethod::ManifestDirectory
@@ -471,19 +468,12 @@ module CNFManager
end
#TODO move to helm module
- def self.helm_template_header(helm_chart_or_directory : String, template_file="/tmp/temp_template.yml", airgapped=false)
+ def self.helm_template_header(helm_chart_or_directory : String, template_file="/tmp/temp_template.yml")
Log.info { "helm_template_header" }
Log.info { "helm_template_header helm_chart_or_directory: #{helm_chart_or_directory}" }
helm = Helm::BinarySingleton.helm
# generate helm chart release name
# use --dry-run to generate yml file
- Log.info { "airgapped mode: #{airgapped}" }
- if airgapped
- # todo make tar info work with a directory
- info = AirGap.tar_info_by_config_src(helm_chart_or_directory)
- Log.info { "airgapped mode info: #{info}" }
- helm_chart_or_directory = info[:tar_name]
- end
Helm.install("--dry-run --generate-name #{helm_chart_or_directory} > #{template_file}")
raw_template = File.read(template_file)
Log.debug { "raw_template: #{raw_template}" }
@@ -495,22 +485,18 @@ module CNFManager
end
#TODO move to helm module
- def self.helm_chart_template_release_name(helm_chart_or_directory : String, template_file="/tmp/temp_template.yml", airgapped=false)
+ def self.helm_chart_template_release_name(helm_chart_or_directory : String, template_file="/tmp/temp_template.yml")
Log.info { "helm_chart_template_release_name" }
- Log.info { "airgapped mode: #{airgapped}" }
Log.info { "helm_chart_template_release_name helm_chart_or_directory: #{helm_chart_or_directory}" }
- hth = helm_template_header(helm_chart_or_directory, template_file, airgapped)
+ hth = helm_template_header(helm_chart_or_directory, template_file)
Log.info { "helm template (should not be a full path): #{hth}" }
hth["NAME"]
end
- def self.generate_and_set_release_name(config_yml_path, airgapped=false, generate_tar_mode=false, src_mode=false)
+ def self.generate_and_set_release_name(config_yml_path, src_mode=false)
Log.info { "generate_and_set_release_name" }
Log.info { "generate_and_set_release_name config_yml_path: #{config_yml_path}" }
- Log.info { "airgapped mode: #{airgapped}" }
- Log.info { "generate_tar_mode: #{generate_tar_mode}" }
- return if generate_tar_mode
yml_file = CNFManager.ensure_cnf_testsuite_yml_path(config_yml_path)
yml_path = CNFManager.ensure_cnf_testsuite_dir(config_yml_path)
@@ -528,17 +514,15 @@ module CNFManager
when Helm::InstallMethod::HelmChart
Log.info { "generate_and_set_release_name install method: #{install_method[0]} data: #{install_method[1]}" }
Log.info { "generate_and_set_release_name helm_chart_or_directory: #{install_method[1]}" }
- release_name = helm_chart_template_release_name(install_method[1], airgapped: airgapped)
+ release_name = helm_chart_template_release_name(install_method[1])
when Helm::InstallMethod::HelmDirectory
Log.info { "helm_directory install method: #{yml_path}/#{install_method[1]}" }
- # todo if in airgapped mode, use path for airgapped repositories
- # todo if in airgapped mode, get the release name
# todo get the release name by looking through everything under /tmp/repositories
Log.info { "generate_and_set_release_name helm_chart_or_directory: #{install_method[1]}" }
if src_mode
- release_name = helm_chart_template_release_name("#{src_helm_directory}", airgapped: airgapped)
+ release_name = helm_chart_template_release_name("#{src_helm_directory}")
else
- release_name = helm_chart_template_release_name("#{install_method[1]}", airgapped: airgapped)
+ release_name = helm_chart_template_release_name("#{install_method[1]}")
end
when Helm::InstallMethod::ManifestDirectory
Log.debug { "manifest_directory install method" }
@@ -624,16 +608,7 @@ module CNFManager
else
wait_count = 180
end
- output_file = args.named["airgapped"].as(String) if args.named["airgapped"]?
- output_file = args.named["output-file"].as(String) if args.named["output-file"]?
- output_file = args.named["of"].as(String) if args.named["if"]?
- input_file = args.named["offline"].as(String) if args.named["offline"]?
- input_file = args.named["input-file"].as(String) if args.named["input-file"]?
- input_file = args.named["if"].as(String) if args.named["if"]?
- airgapped=false
- airgapped=true if args.raw.includes?("airgapped")
-
- cli_args = {config_file: cnf_path, wait_count: wait_count, verbose: check_verbose(args), output_file: output_file, input_file: input_file}
+ cli_args = {config_file: cnf_path, wait_count: wait_count, verbose: check_verbose(args)}
Log.debug { "cli_args: #{cli_args}" }
cli_args
end
@@ -718,38 +693,27 @@ module CNFManager
#TODO don't think we need to make this here
FileUtils.mkdir_p("#{destination_cnf_dir}/#{helm_directory}")
- input_file = cli_args[:input_file]
- output_file = cli_args[:output_file]
config_path = CNFManager.ensure_cnf_testsuite_yml_path(config_file)
- # Input file present
- if input_file && !input_file.empty?
- config = CNFManager::Config.parse_config_yml(config_path, airgapped: true)
- tar_info = AirGap.tar_info_by_config_src(helm_chart)
- tgz_name = tar_info[:tar_name]
-
- # Input file absent, pulling chart
- else
- # Delete pre-existing tgz files
- files_to_delete = find_tgz_files(helm_chart)
- files_to_delete.each do |file|
- FileUtils.rm(file)
- Log.info { "Deleted: #{file}" }
- end
-
- # Pull new version
- helm_info = Helm.pull(helm_chart)
- unless helm_info[:status].success?
- puts "Helm pull error".colorize(:red)
- raise "Helm pull error"
- end
-
- config = CNFManager::Config.parse_config_yml(config_path, generate_tar_mode: output_file && !output_file.empty?)
+ # Pulling chart
+ # Delete pre-existing tgz files
+ files_to_delete = find_tgz_files(helm_chart)
+ files_to_delete.each do |file|
+ FileUtils.rm(file)
+ Log.info { "Deleted: #{file}" }
+ end
- # Discover newly pulled tgz file
- tgz_name = get_and_verify_tgz_name(helm_chart)
+ # Pull new version
+ helm_info = Helm.pull(helm_chart)
+ unless helm_info[:status].success?
+ puts "Helm pull error".colorize(:red)
+ raise "Helm pull error"
end
+ config = CNFManager::Config.parse_config_yml(config_path)
+ # Discover newly pulled tgz file
+ tgz_name = get_and_verify_tgz_name(helm_chart)
+
Log.info { "tgz_name: #{tgz_name}" }
TarClient.untar(tgz_name, "#{destination_cnf_dir}/exported_chart")
@@ -775,21 +739,11 @@ module CNFManager
#sample_setup({config_file: cnf_path, wait_count: wait_count})
def self.sample_setup(cli_args)
- #TODO accept offline mode
Log.info { "sample_setup cli_args: #{cli_args}" }
config_file = cli_args[:config_file]
wait_count = cli_args[:wait_count]
verbose = cli_args[:verbose]
- input_file = cli_args[:input_file]
- output_file = cli_args[:output_file]
- if input_file && !input_file.empty?
- # todo add generate and set tar as well
- config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file), airgapped: true)
- elsif output_file && !output_file.empty?
- config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file), generate_tar_mode: true)
- else
- config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file))
- end
+ config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file))
Log.debug { "config in sample_setup: #{config.cnf_config}" }
release_name = config.cnf_config[:release_name]
install_method = config.cnf_config[:install_method]
@@ -876,24 +830,7 @@ module CNFManager
elapsed_time = Time.measure do
case install_method[0]
when Helm::InstallMethod::ManifestDirectory
- # todo airgap_manifest_directory << prepare a manifest directory for deployment into an airgapped environment, put in airgap module
- if input_file && !input_file.empty?
- yaml_template_files = Find.find("#{destination_cnf_dir}/#{manifest_directory}",
- "*.yaml*", "100")
- yml_template_files = Find.find("#{destination_cnf_dir}/#{manifest_directory}",
- "*.yml*", "100")
- template_files = yaml_template_files + yml_template_files
- Log.info { "(before kubectl apply) calling image_pull_policy on #{template_files}" }
- template_files.map{|x| AirGap.image_pull_policy(x)}
- end
Log.for("verbose").info { "deploying by manifest file" } if verbose
- file_list = Helm::Manifest.manifest_file_list(install_method[1], silent: false)
- yml = Helm::Manifest.manifest_ymls_from_file_list(file_list)
- if input_file && !input_file.empty?
- image_pull(yml, "offline=true")
- else
- image_pull(yml, "offline=false")
- end
KubectlClient::Apply.file("#{destination_cnf_dir}/#{manifest_directory}")
when Helm::InstallMethod::HelmChart
if !helm_install_namespace.empty?
@@ -902,31 +839,11 @@ module CNFManager
else
helm_install_namespace = default_namespace
end
- if input_file && !input_file.empty?
- tar_info = AirGap.tar_info_by_config_src(config.cnf_config[:helm_chart])
- # prepare a helm_chart tar file for deployment into an airgapped environment, put in airgap module
- TarClient.modify_tar!(tar_info[:tar_name]) do |directory|
- template_files = Find.find(directory, "*.yaml*", "100")
- template_files.map{|x| AirGap.image_pull_policy(x)}
- end
- # if in airgapped mode, set helm_chart in config to be the tarball path
- helm_chart = tar_info[:tar_name]
- else
- helm_chart = config.cnf_config[:helm_chart]
- end
+ helm_chart = config.cnf_config[:helm_chart]
if !helm_repo_name.empty? || !helm_repo_url.empty?
Helm.helm_repo_add(helm_repo_name, helm_repo_url)
end
Log.for("verbose").info { "deploying with chart repository" } if verbose
- # Helm.template(release_name, install_method[1], output_file: "cnfs/temp_template.yml", values: helm_values)
- Helm.template(release_name, install_method[1], output_file: "cnfs/temp_template.yml", namespace: helm_install_namespace, values: helm_values)
- yml = Helm::Manifest.parse_manifest_as_ymls(template_file_name: "cnfs/temp_template.yml")
-
- if input_file && !input_file.empty?
- image_pull(yml, "offline=true")
- else
- image_pull(yml, "offline=false")
- end
begin
helm_install = Helm.install(release_name, helm_chart, helm_namespace_option, helm_values)
@@ -949,22 +866,8 @@ module CNFManager
helm_install_namespace = default_namespace
end
Log.for("verbose").info { "deploying with helm directory" } if verbose
- # prepare a helm directory for deployment into an airgapped environment, put in airgap module
- if input_file && !input_file.empty?
- template_files = Dir.glob(["#{destination_cnf_dir}/#{helm_directory}/*.yaml*"])
- template_files.map{|x| AirGap.image_pull_policy(x)}
- end
#TODO Add helm options into cnf-testsuite yml
#e.g. helm install nsm --set insecure=true ./nsm/helm_chart
- # Helm.template(release_name, install_method[1], output_file: "cnfs/temp_template.yml")
- Helm.template(release_name, install_method[1], output_file: "cnfs/temp_template.yml", namespace: helm_install_namespace, values: helm_values)
- yml = Helm::Manifest.parse_manifest_as_ymls(template_file_name: "cnfs/temp_template.yml")
-
- if input_file && !input_file.empty?
- image_pull(yml, "offline=true")
- else
- image_pull(yml, "offline=false")
- end
begin
# helm_install = Helm.install("#{release_name} #{destination_cnf_dir}/#{helm_directory} #{helm_namespace_option}")
@@ -1116,7 +1019,7 @@ module CNFManager
#todo uninstall/reinstall clustertools because of tshark bug
end
- def self.cnf_to_new_cluster(config, kubeconfig, offline=false)
+ def self.cnf_to_new_cluster(config, kubeconfig)
release_name = config.cnf_config[:release_name]
install_method = config.cnf_config[:install_method]
release_name = config.cnf_config[:release_name]
@@ -1145,12 +1048,6 @@ module CNFManager
KubectlClient::Apply.file("#{destination_cnf_dir}/#{manifest_directory}", kubeconfig: kubeconfig)
when Helm::InstallMethod::HelmChart
begin
- if offline
- chart_info = AirGap.tar_info_by_config_src(install_method[1])
- chart_name = chart_info[:chart_name]
- tar_name = chart_info[:tar_name]
- Log.info { "Install Chart In Airgapped Mode: Name: #{chart_name}, Tar: #{tar_name}" }
- end
helm_install = Helm.install("#{release_name} #{helm_chart} --kubeconfig #{kubeconfig} #{helm_namespace_option}")
rescue e : Helm::CannotReuseReleaseNameError
stdout_warning "Release name #{release_name} has already been setup."
diff --git a/src/tasks/utils/cnf_manager_airgap.cr b/src/tasks/utils/cnf_manager_airgap.cr
deleted file mode 100644
index 5f435bb1a..000000000
--- a/src/tasks/utils/cnf_manager_airgap.cr
+++ /dev/null
@@ -1,187 +0,0 @@
-require "./cnf_manager.cr"
-
-module CNFManager
- module CNFAirGap
- # LOGGING.info ./cnf-testsuite cnf_setup cnf-config=example-cnfs/coredns/cnf-testsuite.yml airgapped output-file=./tmp/airgapped.tar.gz
- # LOGGING.info ./cnf-testsuite cnf_setup cnf-config=example-cnfs/coredns/cnf-testsuite.yml output-file=./tmp/airgapped.tar.gz
- # LOGGING.info ./cnf-testsuite cnf_setup cnf-config=example-cnfs/coredns/cnf-testsuite.yml airgapped=./tmp/airgapped.tar.gz
- def self.generate_cnf_setup(config_file : String, output_file, cli_args)
- Log.info { "generate_cnf_setup cnf_config_file: #{config_file}" }
- FileUtils.mkdir_p("#{TarClient::TAR_IMAGES_DIR}")
- # todo create a way to call setup code for directories (cnf manager code)
- config = CNFManager.parsed_config_file(config_file)
- sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file), airgapped: false, generate_tar_mode: true)
- Log.info { "generate sandbox args: sandbox_config: #{sandbox_config}, cli_args: #{cli_args}" }
- CNFManager.sandbox_setup(sandbox_config, cli_args)
- install_method = CNFManager.cnf_installation_method(config)
- Log.info { "generate_cnf_setup images_from_config_src" }
-
- Log.info { "Download CRI Tools" }
- AirGap.download_cri_tools
-
- Log.info { "Add CRI Tools to Airgapped Tar: #{output_file}" }
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "bin/crictl-#{AirGap::CRI_VERSION}-linux-amd64.tar.gz")
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "bin/containerd-#{AirGap::CTR_VERSION}-linux-amd64.tar.gz")
-
- images = CNFManager::GenerateConfig.images_from_config_src(install_method[1], generate_tar_mode: true)
-
- # todo function that takes sandbox containers and extracts images (config images)
- container_names = sandbox_config.cnf_config[:container_names]
- #todo get image name (org name and image name) from config src
-
- if container_names
- config_images = [] of NamedTuple(image_name: String, tag: String)
- container_names.map do |c|
- Log.info { "container_names c: #{c}" }
- # todo get image name for container name
- image = images.find{|x| x[:container_name]==c["name"]}
- if image
- config_images << {image_name: image[:image_name], tag: c["rolling_update_test_tag"]}
- config_images << {image_name: image[:image_name], tag: c["rolling_downgrade_test_tag"]}
- config_images << {image_name: image[:image_name], tag: c["rolling_version_change_test_tag"]}
- config_images << {image_name: image[:image_name], tag: c["rollback_from_tag"]}
- end
- end
- else
- config_images = [] of NamedTuple(image_name: String, tag: String)
- end
- Log.info { "config_images: #{config_images}" }
-
- # todo function that accepts image names and tars them
- images = images + config_images
- images.map do |i|
- input_file = "#{TarClient::TAR_IMAGES_DIR}/#{i[:image_name].split("/")[-1]}_#{i[:tag]}.tar"
- Log.info { "input_file: #{input_file}" }
- image = "#{i[:image_name]}:#{i[:tag]}"
- DockerClient.pull(image)
- DockerClient.save(image, input_file)
- TarClient.append(output_file, "/tmp", "images/" + input_file.split("/")[-1])
- Log.info { "#{output_file} in generate_cnf_setup complete" }
- end
- # todo hardcode install method for helm charts until helm directories / manifest
- # directories are supported
- case install_method[0]
- when Helm::InstallMethod::HelmChart
- Log.debug { "helm_chart : #{install_method[1]}" }
- AirGap.tar_helm_repo(install_method[1], output_file)
- Log.info { "generate_cnf_setup tar_helm_repo complete" }
- # when Helm::InstallMethod::ManifestDirectory
- # LOGGING.debug "manifest_directory : #{install_method[1]}"
- # template_files = Find.find(directory, "*.yaml*", "100")
- # template_files.map{|x| AirGap.image_pull_policy(x)}
- end
- end
-
- #./cnf-testsuite airgapped -o ~/airgapped.tar.gz
- #./cnf-testsuite offline -o ~/airgapped.tar.gz
- #./cnf-testsuite offline -o ~/mydir/airgapped.tar.gz
- def self.generate(output_file : String = "./airgapped.tar.gz")
- Log.info { "cnf_manager generate" }
- FileUtils.rm_rf(output_file)
- FileUtils.mkdir_p("#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}")
- FileUtils.mkdir_p("#{AirGap::TAR_BINARY_DIR}")
-
- # todo put all of these setup elements into a configuration file.
- # todo get this images from helm charts.
-
- [{input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/kubectl.tar",
- image: "bitnami/kubectl:latest"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/chaos-mesh.tar",
- image: "pingcap/chaos-mesh:v1.2.1"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/chaos-daemon.tar",
- image: "pingcap/chaos-daemon:v1.2.1"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/chaos-dashboard.tar",
- image: "pingcap/chaos-dashboard:v1.2.1"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/chaos-kernel.tar",
- image: "pingcap/chaos-kernel:v1.2.1"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/pingcap-coredns.tar",
- image: "pingcap/coredns:v0.2.0"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/sonobuoy.tar",
- image: "docker.io/sonobuoy/sonobuoy:v0.19.0"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/sonobuoy-logs.tar",
- image: "docker.io/sonobuoy/systemd-logs:v0.3"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/litmus-operator.tar",
- image: "litmuschaos/chaos-operator:#{LitmusManager::Version}"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/litmus-runner.tar",
- image: "litmuschaos/chaos-runner:#{LitmusManager::Version}"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/go-runner.tar",
- image: "litmuschaos/go-runner:#{LitmusManager::Version}"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/kind-node.tar",
- image: "kindest/node:v1.21.1"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/gatekeeper.tar",
- image: "openpolicyagent/gatekeeper:v3.6.0"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/gatekeeper-crds.tar",
- image: "openpolicyagent/gatekeeper-crds:v3.6.0"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/tigera-operator.tar",
- image: "quay.io/tigera/operator:v1.20.4"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/calico-controller.tar",
- image: "calico/kube-controllers:v3.20.2"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/calico-flexvol.tar",
- image: "calico/pod2daemon-flexvol:v3.20.2"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/calico-cni.tar",
- image: "calico/cni:v3.20.2"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/calico-node.tar",
- image: "calico/node:v3.20.2"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/calico-typha.tar",
- image: "calico/typha:v3.20.2"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/cilium.tar",
- image: "cilium/cilium:v1.10.5"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/cilium-operator.tar",
- image: "cilium/operator-generic:v1.10.5"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/cri-tools.tar",
- image: "conformance/cri-tools:v1.0.0"},
- {input_file: "#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/prometheus.tar",
- image: "prom/prometheus:v2.18.1"}].map do |x|
- DockerClient.pull(x[:image])
- DockerClient.save(x[:image], x[:input_file])
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "bootstrap_images/" + x[:input_file].split("/")[-1])
- end
- # todo keep crictl and containerd tar files, move to the rest to cnf-test-suite specific bootstrap
- AirGap.tar_manifest("https://litmuschaos.github.io/litmus/litmus-operator-v#{LitmusManager::Version}.yaml", output_file)
- AirGap.tar_manifest("https://raw.githubusercontent.com/litmuschaos/chaos-operator/master/deploy/chaos_crds.yaml", output_file)
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-latency/experiment.yaml", output_file, prefix: "lat-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-latency/rbac.yaml", output_file, prefix: "lat-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-corruption/experiment.yaml", output_file, prefix: "corr-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-corruption/rbac.yaml", output_file, prefix: "corr-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-duplication/experiment.yaml", output_file, prefix: "dup-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-network-duplication/rbac.yaml", output_file, prefix: "dup-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-delete/experiment.yaml", output_file, prefix: "pod-delete-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-delete/rbac.yaml", output_file, prefix: "pod-delete-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-memory-hog/experiment.yaml", output_file, prefix: "pod-memory-hog-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-memory-hog/rbac.yaml", output_file, prefix: "pod-memory-hog-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-io-stress/experiment.yaml", output_file, prefix: "pod-io-stress-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/pod-io-stress/rbac.yaml", output_file, prefix: "pod-io-stress-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/disk-fill/experiment.yaml", output_file, prefix: "disk-fill-")
- AirGap.tar_manifest("https://hub.litmuschaos.io/api/chaos/#{LitmusManager::Version}?file=charts/generic/disk-fill/rbac.yaml", output_file, prefix: "disk-fill-")
- url = "https://github.com/vmware-tanzu/sonobuoy/releases/download/v#{SONOBUOY_K8S_VERSION}/sonobuoy_#{SONOBUOY_K8S_VERSION}_#{SONOBUOY_OS}_amd64.tar.gz"
- TarClient.tar_file_by_url(url, output_file, "sonobuoy.tar.gz")
- url = "https://github.com/armosec/kubescape/releases/download/v#{KUBESCAPE_VERSION}/kubescape-ubuntu-latest"
- TarClient.tar_file_by_url(url, output_file, "kubescape-ubuntu-latest")
- url = "https://github.com/kubernetes-sigs/kind/releases/download/v#{KIND_VERSION}/kind-linux-amd64"
- TarClient.tar_file_by_url(url, output_file, "kind")
- download_path = "download/"
- FileUtils.rm_rf("#{TarClient::TAR_TMP_BASE}/#{download_path}")
- FileUtils.mkdir_p("#{TarClient::TAR_TMP_BASE}/" + download_path)
- `./tools/kubescape/kubescape download framework nsa --output #{TarClient::TAR_DOWNLOAD_DIR}/nsa.json`
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "#{download_path}nsa.json")
- Helm.helm_repo_add("chaos-mesh", "https://charts.chaos-mesh.org")
- # todo create helm chart configuration yaml that includes all chart elements for specs
- AirGap.tar_helm_repo("chaos-mesh/chaos-mesh --version 0.5.1", output_file)
- Helm.helm_repo_add("gatekeeper","https://open-policy-agent.github.io/gatekeeper/charts")
-
- # Calico Helm Relase Can't be Pinned, Download Directly
- # Helm.helm_repo_add("projectcalico","https://docs.projectcalico.org/charts")
- # AirGap.tar_helm_repo("projectcalico/tigera-operator --version v3.20.2", output_file)
-
- FileUtils.mkdir_p("#{TarClient::TAR_TMP_BASE}/repositories/projectcalico_tigera-operator")
- Helm.fetch("https://github.com/projectcalico/calico/releases/download/v3.20.2/tigera-operator-v3.20.2-1.tgz -d #{TarClient::TAR_TMP_BASE}/repositories/projectcalico_tigera-operator")
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "repositories/projectcalico_tigera-operator")
- FileUtils.rm_rf("#{TarClient::TAR_TMP_BASE}/repositories/projectcalico_tigera-operator")
-
- Helm.helm_repo_add("cilium","https://helm.cilium.io/")
- AirGap.tar_helm_repo("cilium/cilium --version 1.10.5", output_file)
- AirGap.tar_helm_repo("gatekeeper/gatekeeper --version 3.6.0", output_file)
- AirGap.generate(output_file, append: true)
- end
- end
-end
diff --git a/src/tasks/utils/config.cr b/src/tasks/utils/config.cr
index 10985576a..804ea9e82 100644
--- a/src/tasks/utils/config.cr
+++ b/src/tasks/utils/config.cr
@@ -9,9 +9,8 @@ require "./task.cr"
module CNFManager
class Config
- def initialize(cnf_config, airgapped=false)
+ def initialize(cnf_config)
@cnf_config = cnf_config
- @airgapped = airgapped
end
#when addeding to this you must add to task.cr's CNFManager::Config.new(
property cnf_config : NamedTuple(destination_cnf_dir: String,
@@ -61,17 +60,15 @@ module CNFManager
),
image_registry_fqdns: Hash(String, String ) | Nil)
- def self.parse_config_yml(config_yml_path : String, airgapped=false, generate_tar_mode=false) : CNFManager::Config
+ def self.parse_config_yml(config_yml_path : String) : CNFManager::Config
LOGGING.debug "parse_config_yml config_yml_path: #{config_yml_path}"
- LOGGING.info "airgapped: #{airgapped}"
- LOGGING.info "generate_tar_mode: #{generate_tar_mode}"
yml_file = CNFManager.ensure_cnf_testsuite_yml_path(config_yml_path)
#TODO modify the destination testsuite yml instead of the source testsuite yml
# (especially in the case of the release manager). Then reread the destination config
# TODO for cleanup, read source, then find destination and use release name from destination config
# TODO alternatively use a CRD to save the release name
- CNFManager.generate_and_set_release_name(config_yml_path, airgapped, generate_tar_mode)
+ CNFManager.generate_and_set_release_name(config_yml_path)
config = CNFManager.parsed_config_file(yml_file)
install_method = CNFManager.cnf_installation_method(config)
@@ -223,21 +220,21 @@ module CNFManager
def self.install_method_by_config_file(config_file) : Helm::InstallMethod
LOGGING.info "install_data_by_config_file"
config = CNFManager.parsed_config_file(config_file)
- sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file), airgapped: true, generate_tar_mode: false)
+ sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file))
install_method = CNFManager.cnf_installation_method(config)
install_method[0]
end
def self.config_src_by_config_file(config_file) : String
LOGGING.info "install_data_by_config_file"
config = CNFManager.parsed_config_file(config_file)
- sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file), airgapped: true, generate_tar_mode: false)
+ sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file))
install_method = CNFManager.cnf_installation_method(config)
install_method[1]
end
def self.release_name_by_config_file(config_file) : String
LOGGING.info "release_name_by_config_file"
config = CNFManager.parsed_config_file(config_file)
- sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file), airgapped: true, generate_tar_mode: false)
+ sandbox_config = CNFManager::Config.parse_config_yml(CNFManager.ensure_cnf_testsuite_yml_path(config_file))
release_name = sandbox_config.cnf_config[:release_name]
end
end
diff --git a/src/tasks/utils/generate_config.cr b/src/tasks/utils/generate_config.cr
index 465b7accd..56c38b4d9 100644
--- a/src/tasks/utils/generate_config.cr
+++ b/src/tasks/utils/generate_config.cr
@@ -8,18 +8,11 @@ require "./task.cr"
module CNFManager
module GenerateConfig
-
-
- def self.export_manifest(config_src, output_file="./cnf-testsuite.yml", airgapped=false, generate_tar_mode=false)
+ def self.export_manifest(config_src, output_file="./cnf-testsuite.yml")
LOGGING.info "export_manifest"
LOGGING.info "export_manifest config_src: #{config_src}"
- LOGGING.info "airgapped: #{airgapped}"
- LOGGING.info "generate_tar_mode: #{generate_tar_mode}"
generate_initial_testsuite_yml(config_src, output_file)
- CNFManager.generate_and_set_release_name(output_file,
- airgapped: airgapped,
- generate_tar_mode: generate_tar_mode,
- src_mode: true)
+ CNFManager.generate_and_set_release_name(output_file, src_mode: true)
config = CNFManager.parsed_config_file(output_file)
release_name = optional_key_as_string(config, "release_name")
install_method = CNFManager.install_method_by_config_src(config_src)
@@ -38,13 +31,11 @@ module CNFManager
#get list of image:tags from helm chart/helm directory/manifest file
#note: config_src must be an absolute path if a directory, todo: make this more resilient
- def self.images_from_config_src(config_src, airgapped=false, generate_tar_mode=false)
+ def self.images_from_config_src(config_src)
LOGGING.info "images_from_config_src"
- LOGGING.info "airgapped: #{airgapped}"
- LOGGING.info "generate_tar_mode: #{generate_tar_mode}"
#return container image name/tag
ret_containers = [] of NamedTuple(container_name: String, image_name: String, tag: String)
- resource_ymls = CNFManager::GenerateConfig.export_manifest(config_src, airgapped: airgapped, generate_tar_mode: generate_tar_mode)
+ resource_ymls = CNFManager::GenerateConfig.export_manifest(config_src)
resource_resp = resource_ymls.map do | resource |
LOGGING.info "gen config resource: #{resource}"
unless resource["kind"].as_s.downcase == "service" ## services have no containers
diff --git a/src/tasks/utils/image_prepull.cr b/src/tasks/utils/image_prepull.cr
deleted file mode 100644
index 1134caa37..000000000
--- a/src/tasks/utils/image_prepull.cr
+++ /dev/null
@@ -1,36 +0,0 @@
-require "kubectl_client"
-require "airgap"
-
-# todo put this in bootstrap utils
-def self.image_pull(yml, offline)
- Log.info { "image_pull func" }
- containers = yml.map { |y|
- mc = Helm::Manifest.manifest_containers(y)
- mc.as_a? if mc
- }.flatten.compact
-
- images = containers.flatten.map {|x|
- LOGGING.debug "container x: #{x}"
- image = x.dig?("image")
- # if image
- # LOGGING.debug "image: #{image.as_s}"
- # parsed_image = DockerClient.parse_image(image.as_s)
- # image = "#{parsed_image["complete_fqdn"]}"
- # end
- image
- }.compact
- LOGGING.info "Images: #{images}"
-
- # todo put this in bootstrap utils
- unless offline
- resp = AirGap.create_pod_by_image("conformance/cri-tools:latest", "cri-tools")
- end
-
- images.map do |image|
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
- pods = KubectlClient::Get.pods_by_label(pods, "name", "cri-tools")
- pods.map do |pod|
- KubectlClient.exec("#{pod.dig?("metadata", "name")} -- crictl pull #{image}")
- end
- end
-end
diff --git a/src/tasks/workload/compatibility.cr b/src/tasks/workload/compatibility.cr
index 403c4ba80..85d5e0b34 100644
--- a/src/tasks/workload/compatibility.cr
+++ b/src/tasks/workload/compatibility.cr
@@ -509,45 +509,19 @@ task "validate_config" do |_, args|
end
end
-def setup_calico_cluster(cluster_name : String, offline : Bool) : KindManager::Cluster
- if offline
- Log.info { "Running cni_compatible(Cluster Creation) in Offline Mode" }
-
- chart_directory = "#{TarClient::TAR_REPOSITORY_DIR}/projectcalico_tigera-operator"
- chart = Dir.entries("#{chart_directory}")[1]
- status = `docker image load -i #{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}/kind-node.tar`
- Log.info { "#{status}" }
- Log.info { "Installing Airgapped CNI Chart: #{chart_directory}/#{chart}" }
- calico_cluster = KindManager.create_cluster_with_chart_and_wait(
- cluster_name,
- KindManager.disable_cni_config,
- "#{chart_directory}/#{chart} --namespace calico",
- offline
- )
- ENV["KUBECONFIG"]="#{calico_cluster.kubeconfig}"
- #TODO Don't bootstrap all images, only Calico & Cilium are needed.
- if Dir.exists?("#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}")
- AirGap.cache_images(kind_name: "calico-test-control-plane" )
- AirGap.cache_images(cnf_setup: true, kind_name: "calico-test-control-plane" )
- else
- puts "Bootstrap directory is missing, please run ./cnf-testsuite setup offline=".colorize(:red)
- raise "Bootstrap directory is missing, please run ./cnf-testsuite setup offline="
- end
- else
- Log.info { "Running cni_compatible(Cluster Creation) in Online Mode" }
- Helm.helm_repo_add("projectcalico","https://docs.projectcalico.org/charts")
- calico_cluster = KindManager.create_cluster_with_chart_and_wait(
- cluster_name,
- KindManager.disable_cni_config,
- "projectcalico/tigera-operator --version v3.20.2",
- offline
- )
- end
+def setup_calico_cluster(cluster_name : String) : KindManager::Cluster
+ Log.info { "Running cni_compatible(Cluster Creation)" }
+ Helm.helm_repo_add("projectcalico","https://docs.projectcalico.org/charts")
+ calico_cluster = KindManager.create_cluster_with_chart_and_wait(
+ cluster_name,
+ KindManager.disable_cni_config,
+ "projectcalico/tigera-operator --version v3.20.2"
+ )
return calico_cluster
end
-def setup_cilium_cluster(cluster_name : String, offline : Bool) : KindManager::Cluster
+def setup_cilium_cluster(cluster_name : String) : KindManager::Cluster
chart_opts = [
"--set operator.replicas=1",
"--set image.repository=cilium/cilium",
@@ -557,30 +531,11 @@ def setup_cilium_cluster(cluster_name : String, offline : Bool) : KindManager::C
]
kind_manager = KindManager.new
- cluster = kind_manager.create_cluster(cluster_name, KindManager.disable_cni_config, offline)
-
- if offline
- chart_directory = "#{TarClient::TAR_REPOSITORY_DIR}/cilium_cilium"
- chart = Dir.entries("#{chart_directory}")[2]
- Log.info { "Installing Airgapped CNI Chart: #{chart_directory}/#{chart}" }
-
- chart = "#{chart_directory}/#{chart}"
- Helm.install("#{cluster_name}-plugin #{chart} #{chart_opts.join(" ")} --namespace kube-system --kubeconfig #{cluster.kubeconfig}")
-
- ENV["KUBECONFIG"]="#{cluster.kubeconfig}"
- if Dir.exists?("#{AirGap::TAR_BOOTSTRAP_IMAGES_DIR}")
- AirGap.cache_images(kind_name: "cilium-test-control-plane" )
- AirGap.cache_images(cnf_setup: true, kind_name: "cilium-test-control-plane" )
- else
- puts "Bootstrap directory is missing, please run ./cnf-testsuite setup offline=".colorize(:red)
- raise "Bootstrap directory is missing, please run ./cnf-testsuite setup offline="
- end
- else
- Helm.helm_repo_add("cilium","https://helm.cilium.io/")
- chart = "cilium/cilium"
- chart_opts.push("--version 1.15.4")
- Helm.install("#{cluster_name}-plugin #{chart} #{chart_opts.join(" ")} --namespace kube-system --kubeconfig #{cluster.kubeconfig}")
- end
+ cluster = kind_manager.create_cluster(cluster_name, KindManager.disable_cni_config)
+ Helm.helm_repo_add("cilium","https://helm.cilium.io/")
+ chart = "cilium/cilium"
+ chart_opts.push("--version 1.15.4")
+ Helm.install("#{cluster_name}-plugin #{chart} #{chart_opts.join(" ")} --namespace kube-system --kubeconfig #{cluster.kubeconfig}")
cluster.wait_until_pods_ready()
Log.info { "cilium kubeconfig: #{cluster.kubeconfig}" }
@@ -595,19 +550,14 @@ task "cni_compatible" do |t, args|
ensure_kubeconfig!
kubeconfig_orig = ENV["KUBECONFIG"]
begin
- if args.named["offline"]? && args.named["offline"]? != "false"
- offline = true
- else
- offline = false
- end
- calico_cluster = setup_calico_cluster("calico-test", offline)
+ calico_cluster = setup_calico_cluster("calico-test")
Log.info { "calico kubeconfig: #{calico_cluster.kubeconfig}" }
- calico_cnf_passed = CNFManager.cnf_to_new_cluster(config, calico_cluster.kubeconfig, offline)
+ calico_cnf_passed = CNFManager.cnf_to_new_cluster(config, calico_cluster.kubeconfig)
Log.info { "calico_cnf_passed: #{calico_cnf_passed}" }
puts "CNF failed to install on Calico CNI cluster".colorize(:red) unless calico_cnf_passed
- cilium_cluster = setup_cilium_cluster("cilium-test", offline)
- cilium_cnf_passed = CNFManager.cnf_to_new_cluster(config, cilium_cluster.kubeconfig, offline)
+ cilium_cluster = setup_cilium_cluster("cilium-test")
+ cilium_cnf_passed = CNFManager.cnf_to_new_cluster(config, cilium_cluster.kubeconfig)
Log.info { "cilium_cnf_passed: #{cilium_cnf_passed}" }
puts "CNF failed to install on Cilium CNI cluster".colorize(:red) unless cilium_cnf_passed
diff --git a/src/tasks/workload/configuration.cr b/src/tasks/workload/configuration.cr
index e17bbca82..2dc5e2de9 100644
--- a/src/tasks/workload/configuration.cr
+++ b/src/tasks/workload/configuration.cr
@@ -277,11 +277,6 @@ task "hardcoded_ip_addresses_in_k8s_runtime_configuration" do |t, args|
KubectlClient::Create.command("namespace hardcoded-ip-test")
unless helm_chart.empty?
- if args.named["offline"]?
- info = AirGap.tar_info_by_config_src(helm_chart)
- Log.for(t.name).info { "airgapped mode info: #{info}" }
- helm_chart = info[:tar_name]
- end
helm_install = Helm.install("--namespace hardcoded-ip-test hardcoded-ip-test #{helm_chart} --dry-run --debug > #{helm_chart_yml_path}")
else
helm_install = Helm.install("--namespace hardcoded-ip-test hardcoded-ip-test #{destination_cnf_dir}/#{helm_directory} --dry-run --debug > #{helm_chart_yml_path}")
@@ -623,17 +618,10 @@ task "alpha_k8s_apis" do |t, args|
ensure_kubeconfig!
kubeconfig_orig = ENV["KUBECONFIG"]
- # No offline support for this task for now
- if args.named["offline"]? && args.named["offline"]? != "false"
- next CNFManager::TestcaseResult.new(CNFManager::ResultStatus::Skipped, "alpha_k8s_apis chaos test skipped")
- end
-
# Get kubernetes version of the current server.
# This is used to setup kind with same k8s image version.
k8s_server_version = KubectlClient.server_version
- # Online mode workflow below
- offline = false
cluster_name = "apisnooptest"
# Ensure any old cluster is deleted
KindManager.new.delete_cluster(cluster_name)
@@ -643,7 +631,7 @@ task "alpha_k8s_apis" do |t, args|
Log.info { "apisnoop cluster kubeconfig: #{cluster.kubeconfig}" }
ENV["KUBECONFIG"] = "#{cluster.kubeconfig}"
- cnf_setup_complete = CNFManager.cnf_to_new_cluster(config, cluster.kubeconfig, offline)
+ cnf_setup_complete = CNFManager.cnf_to_new_cluster(config, cluster.kubeconfig)
# CNF setup failed on kind cluster. Inform in test output.
unless cnf_setup_complete
diff --git a/src/tasks/workload/observability.cr b/src/tasks/workload/observability.cr
index 3942c4213..51d94ce27 100644
--- a/src/tasks/workload/observability.cr
+++ b/src/tasks/workload/observability.cr
@@ -41,7 +41,6 @@ end
desc "Does the CNF emit prometheus traffic"
task "prometheus_traffic" do |t, args|
- next if args.named["offline"]?
task_response = CNFManager::Task.task_runner(args, task: t) do |args, config|
release_name = config.cnf_config[:release_name]
destination_cnf_dir = config.cnf_config[:destination_cnf_dir]
@@ -159,7 +158,6 @@ end
desc "Does the CNF emit prometheus open metric compatible traffic"
task "open_metrics", ["prometheus_traffic"] do |t, args|
- next if args.named["offline"]?
task_response = CNFManager::Task.task_runner(args, task: t) do |args, config|
release_name = config.cnf_config[:release_name]
configmap = KubectlClient::Get.configmap("cnf-testsuite-#{release_name}-open-metrics")
@@ -181,7 +179,6 @@ end
desc "Are the CNF's logs captured by a logging system"
task "routed_logs", ["install_cluster_tools"] do |t, args|
- next if args.named["offline"]?
task_response = CNFManager::Task.task_runner(args, task: t) do |args, config|
match = FluentManager.find_active_match
unless match
@@ -218,8 +215,6 @@ task "tracing" do |t, args|
Log.for(t.name).info { "Running test" }
Log.for(t.name).info { "tracing args: #{args.inspect}" }
- next if args.named["offline"]?
-
cnf_config_ok = check_cnf_config(args) || CNFManager.destination_cnfs_exist?
CNFManager::Task.task_runner(args, task: t) do |args, config|
if cnf_config_ok
diff --git a/src/tasks/workload/reliability.cr b/src/tasks/workload/reliability.cr
index 618286f29..52ff6ac08 100644
--- a/src/tasks/workload/reliability.cr
+++ b/src/tasks/workload/reliability.cr
@@ -126,26 +126,17 @@ task "pod_network_latency", ["install_litmus"] do |t, args|
if test_passed
Log.info { "Running for: #{spec_labels}"}
Log.info { "Spec Hash: #{args.named["pod_labels"]?}" }
- if args.named["offline"]?
- Log.info { "install resilience offline mode" }
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/lat-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/lat-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/lat-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-network-latency/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-network-latency/rbac.yaml"
-
-
-
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-network-latency/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-network-latency/rbac.yaml"
+
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
#TODO Use Labels to Annotate, not resource["name"]
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
@@ -206,24 +197,16 @@ task "pod_network_corruption", ["install_litmus"] do |t, args|
test_passed = false
end
if test_passed
- if args.named["offline"]?
- Log.info {"install resilience offline mode"}
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/corr-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/corr-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/corr-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-network-corruption/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-network-corruption/rbac.yaml"
-
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
-
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-network-corruption/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-network-corruption/rbac.yaml"
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
+
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "pod-network-corruption"
@@ -268,24 +251,18 @@ task "pod_network_duplication", ["install_litmus"] do |t, args|
test_passed = false
end
if test_passed
- if args.named["offline"]?
- Log.info {"install resilience offline mode"}
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/dup-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/dup-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/dup-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-network-duplication/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-network-duplication/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-network-duplication/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-network-duplication/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "pod-network-duplication"
@@ -328,24 +305,18 @@ task "disk_fill", ["install_litmus"] do |t, args|
test_passed = false
end
if test_passed
- if args.named["offline"]?
- Log.info { "install resilience offline mode" }
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/disk-fill-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/disk-fill-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/disk-fill-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/disk-fill/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/disk-fill/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/disk-fill/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/disk-fill/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "disk-fill"
@@ -418,26 +389,19 @@ task "pod_delete", ["install_litmus"] do |t, args|
if test_passed
Log.info { "Running for: #{spec_labels}"}
Log.info { "Spec Hash: #{args.named["pod_labels"]?}" }
- if args.named["offline"]?
- Log.info { "install resilience offline mode" }
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/pod-delete-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-delete-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-delete-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-delete/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-delete/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-delete/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-delete/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
- KubectlClient::Apply.file(rbac_path)
- end
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ KubectlClient::Apply.file(rbac_path)
Log.info { "resource: #{resource["name"]}" }
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
@@ -499,24 +463,18 @@ task "pod_memory_hog", ["install_litmus"] do |t, args|
test_passed = false
end
if test_passed
- if args.named["offline"]?
- Log.info { "install resilience offline mode" }
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/pod-memory-hog-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-memory-hog-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-memory-hog-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-memory-hog/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-memory-hog/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-memory-hog/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-memory-hog/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "pod-memory-hog"
@@ -563,24 +521,18 @@ task "pod_io_stress", ["install_litmus"] do |t, args|
test_passed = false
end
if test_passed
- if args.named["offline"]?
- Log.info { "install resilience offline mode" }
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/pod-io-stress-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-io-stress-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-io-stress-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-io-stress/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-io-stress/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-io-stress/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-io-stress/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "pod-io-stress"
@@ -634,24 +586,18 @@ task "pod_dns_error", ["install_litmus"] do |t, args|
test_passed = false
end
if test_passed
- if args.named["offline"]?
- Log.info { "install resilience offline mode" }
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/pod-dns-error-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-dns-error-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/pod-dns-error-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-dns-error/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-dns-error/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/pod-dns-error/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/pod-dns-error/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "pod-dns-error"
diff --git a/src/tasks/workload/security.cr b/src/tasks/workload/security.cr
index 46a2b2c87..d90207232 100644
--- a/src/tasks/workload/security.cr
+++ b/src/tasks/workload/security.cr
@@ -265,8 +265,6 @@ end
desc "Check if security services are being used to harden the application"
task "linux_hardening", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Linux hardening")
@@ -286,8 +284,6 @@ end
desc "Check if the containers have insecure capabilities."
task "insecure_capabilities", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Insecure capabilities")
@@ -307,8 +303,6 @@ end
desc "Check if the containers have CPU limits set"
task "cpu_limits", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Ensure CPU limits are set")
@@ -328,8 +322,6 @@ end
desc "Check if the containers have memory limits set"
task "memory_limits", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Ensure memory limits are set")
@@ -349,8 +341,6 @@ end
desc "Check Ingress and Egress traffic policy"
task "ingress_egress_blocked", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Ingress and Egress blocked")
@@ -370,8 +360,6 @@ end
desc "Check the Host PID/IPC privileges of the containers"
task "host_pid_ipc_privileges", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Host PID/IPC privileges")
@@ -391,8 +379,6 @@ end
desc "Check if the containers are running with non-root user with non-root group membership"
task "non_root_containers", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Non-root containers")
@@ -412,8 +398,6 @@ end
desc "Check that privileged containers are not used"
task "privileged_containers", ["kubescape_scan" ] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Privileged container")
@@ -434,8 +418,6 @@ end
desc "Check if containers have immutable file systems"
task "immutable_file_systems", ["kubescape_scan"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
results_json = Kubescape.parse
test_json = Kubescape.test_by_test_name(results_json, "Immutable container filesystem")
@@ -455,8 +437,6 @@ end
desc "Check if containers have hostPath mounts"
task "hostpath_mounts", ["install_kubescape"] do |t, args|
- next if args.named["offline"]?
-
CNFManager::Task.task_runner(args, task: t) do |args, config|
kubescape_control_id = "C-0048"
Kubescape.scan(control_id: kubescape_control_id)
diff --git a/src/tasks/workload/state.cr b/src/tasks/workload/state.cr
index c6b7fce1a..83695decf 100644
--- a/src/tasks/workload/state.cr
+++ b/src/tasks/workload/state.cr
@@ -289,36 +289,25 @@ task "node_drain", ["install_litmus"] do |t, args|
litmus_nodes = node_names - ["#{litmus_nodeName}"]
Log.info { "Schedulable Litmus Nodes: #{litmus_nodes}" }
- HttpHelper.download("#{LitmusManager::ONLINE_LITMUS_OPERATOR}","#{LitmusManager::DOWNLOADED_LITMUS_FILE}")
- if args.named["offline"]?
- Log.info {"Re-Schedule Litmus in offline mode"}
- LitmusManager.add_node_selector(litmus_nodes[0], airgap: true)
- else
- Log.info {"Re-Schedule Litmus in online mode"}
- LitmusManager.add_node_selector(litmus_nodes[0], airgap: false)
- end
+ HttpHelper.download("#{LitmusManager::LITMUS_OPERATOR}","#{LitmusManager::DOWNLOADED_LITMUS_FILE}")
+ Log.info {"Re-Schedule Litmus"}
+ LitmusManager.add_node_selector(litmus_nodes[0])
KubectlClient::Apply.file("#{LitmusManager::MODIFIED_LITMUS_FILE}")
KubectlClient::Get.resource_wait_for_install(kind: "Deployment", resource_name: "chaos-operator-ce", wait_count: 180, namespace: "litmus")
end
- if args.named["offline"]?
- Log.info {"install resilience offline mode"}
- AirGap.image_pull_policy("#{OFFLINE_MANIFESTS_PATH}/node-drain-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/node-drain-experiment.yaml")
- KubectlClient::Apply.file("#{OFFLINE_MANIFESTS_PATH}/node-drain-rbac.yaml")
- else
- experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/node-drain/fault.yaml"
- rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/node-drain/rbac.yaml"
+ experiment_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::Version}/faults/kubernetes/node-drain/fault.yaml"
+ rbac_url = "https://raw.githubusercontent.com/litmuschaos/chaos-charts/#{LitmusManager::RBAC_VERSION}/charts/generic/node-drain/rbac.yaml"
- experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
- KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
+ experiment_path = LitmusManager.download_template(experiment_url, "#{t.name}_experiment.yaml")
+ KubectlClient::Apply.file(experiment_path, namespace: app_namespace)
- rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
- rbac_yaml = File.read(rbac_path)
- rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
- File.write(rbac_path, rbac_yaml)
- KubectlClient::Apply.file(rbac_path)
- end
+ rbac_path = LitmusManager.download_template(rbac_url, "#{t.name}_rbac.yaml")
+ rbac_yaml = File.read(rbac_path)
+ rbac_yaml = rbac_yaml.gsub("namespace: default", "namespace: #{app_namespace}")
+ File.write(rbac_path, rbac_yaml)
+ KubectlClient::Apply.file(rbac_path)
+
KubectlClient::Annotate.run("--overwrite -n #{app_namespace} deploy/#{resource["name"]} litmuschaos.io/chaos=\"true\"")
chaos_experiment_name = "node-drain"
diff --git a/utils/airgap/airgap.cr b/utils/airgap/airgap.cr
deleted file mode 100644
index c41339128..000000000
--- a/utils/airgap/airgap.cr
+++ /dev/null
@@ -1,405 +0,0 @@
-# coding: utf-8
-require "totem"
-require "colorize"
-# require "./tar.cr"
-require "tar"
-require "find"
-require "docker_client"
-require "kubectl_client"
-# require "./airgap_utils.cr"
-require "file_utils"
-require "ecr"
-
-class CriToolsTemplate
- def initialize(@name : String, @image : String)
- end
-
- ECR.def_to_s "#{__DIR__}/cri-tools-template.yml.ecr"
-end
-
-# todo put in a separate library. it shold go under ./tools for now
-module AirGap
- CRI_VERSION="v1.17.0"
- CTR_VERSION="1.5.0"
- TAR_BOOTSTRAP_IMAGES_DIR = "/tmp/bootstrap_images"
- TAR_REPOSITORY_DIR = "/tmp/repositories"
- TAR_BINARY_DIR = "/tmp/binaries"
-
- def self.tar_helm_repo(command, output_file : String = "./airgapped.tar.gz")
- Log.info { "tar_helm_repo command: #{command} output_file: #{output_file}" }
- tar_dir = AirGap.helm_tar_dir(command)
- FileUtils.mkdir_p(tar_dir)
- Helm.fetch("#{command} -d #{tar_dir}")
- Log.debug { "ls #{tar_dir}:" + "#{Dir.children(tar_dir)}" }
- info = AirGap.tar_info_by_config_src(command)
- repo = info[:repo]
- repo_dir = info[:repo_dir]
- chart_name = info[:chart_name]
- repo_path = info[:repo_path]
- tar_dir = info[:tar_dir]
- tar_name = info[:tar_name]
-
- TarClient.modify_tar!(tar_name) do |directory|
- template_files = Find.find(directory, "*.yaml*", "100")
- template_files.map{|x| AirGap.image_pull_policy(x)}
- end
- TarClient.append(output_file, "/tmp", "#{repo_path}")
- ensure
- FileUtils.rm_rf("/tmp/#{repo_path}")
- end
-
- def self.tar_manifest(url, output_file : String = "./airgapped.tar.gz", prefix="")
- manifest_path = "manifests/"
- FileUtils.rm_rf("/tmp/#{manifest_path}")
- FileUtils.mkdir_p("/tmp/" + manifest_path)
- manifest_name = prefix + url.split("/").last
- manifest_full_path = manifest_path + manifest_name
- Log.info { "manifest_name: #{manifest_name}" }
- Log.info { "manifest_full_path: #{manifest_full_path}" }
- download_path = "/tmp/" + manifest_full_path
- HttpHelper.download("#{url}", "#{download_path}")
-
- # Halite.get("#{url}") do |response|
- # File.open("/tmp/" + manifest_full_path, "w") do |file|
- # IO.copy(response.body_io, file)
- # end
- # end
-
- TarClient.append(output_file, "/tmp", manifest_full_path)
- ensure
- FileUtils.rm_rf("/tmp/#{manifest_path}")
- end
-
- #./cnf-testsuite airgapped -o ~/airgapped.tar.gz
- #./cnf-testsuite offline -o ~/airgapped.tar.gz
- #./cnf-testsuite offline -o ~/mydir/airgapped.tar.gz
- def self.generate(output_file : String = "./airgapped.tar.gz", append=false)
- FileUtils.rm_rf(output_file) unless append
- FileUtils.mkdir_p("#{TAR_BOOTSTRAP_IMAGES_DIR}")
- AirGap.download_cri_tools
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "bin/crictl-#{CRI_VERSION}-linux-amd64.tar.gz")
- TarClient.append(output_file, TarClient::TAR_TMP_BASE, "bin/containerd-#{CTR_VERSION}-linux-amd64.tar.gz")
- end
-
- #./cnf-testsuite setup --offline=./airgapped.tar.gz
- def self.extract(output_file : String = "./airgapped.tar.gz", output_dir="/tmp")
- Log.info { "extract" }
- TarClient.untar(output_file, output_dir)
- end
-
- def self.cache_images(cnf_setup=false, kind_name=false)
- Log.info { "cache_images" }
- unless kind_name
- AirGap.bootstrap_cluster()
- end
- #TODO Potentially remove this.
- if ENV["CRYSTAL_ENV"]? == "TEST"
- # todo change chaos-mesh tar to something more generic
- image_files = ["#{TAR_BOOTSTRAP_IMAGES_DIR}/kubectl.tar",
- "#{TAR_BOOTSTRAP_IMAGES_DIR}/chaos-mesh.tar"]
- tar_image_files = Find.find("#{TarClient::TAR_IMAGES_DIR}", "*.tar*")
- image_files = image_files + tar_image_files + Find.find("#{TarClient::TAR_IMAGES_DIR}", "*.tgz*")
- else
- if cnf_setup
- tar_image_files = Find.find("#{TarClient::TAR_IMAGES_DIR}", "*.tar*")
- image_files = tar_image_files + Find.find("#{TarClient::TAR_IMAGES_DIR}", "*.tgz*")
- else
- tar_image_files = Find.find("#{TAR_BOOTSTRAP_IMAGES_DIR}", "*.tar*")
- image_files = tar_image_files + Find.find("#{TAR_BOOTSTRAP_IMAGES_DIR}", "*.tgz*")
- end
- end
- Log.info { "publishing: #{image_files}" }
- resp = image_files.map {|x| AirGap.publish_tarball(x, kind_name)}
- Log.debug { "resp: #{resp}" }
- resp
- end
-
- # # TODO add tar binary to prereqs/documentation
- def self.bootstrap_cluster
- pods = AirGap.pods_with_tar()
- Log.info { "TAR POD: #{pods}" }
- tar_pod_name = pods[0].dig?("metadata", "name") if pods[0]?
- Log.info { "TAR POD NAME: #{tar_pod_name}" }
- unless tar_pod_name
- Log.info { "NO TAR POD, CHECKING FOR PODS WITH SHELL" }
- pods = AirGap.pods_with_sh()
- no_tar = true
- end
- #TODO Ensure images found are available on all schedulable nodes on the cluster.
- images = AirGap.pod_images(pods)
- if images.empty?
- raise "No images with Tar or Shell found. Please deploy a Pod with Tar or Shell to your cluster."
- end
- resp = AirGap.create_pod_by_image(images[0], "cri-tools")
-
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
- pods = KubectlClient::Get.pods_by_label(pods, "name", "cri-tools")
- KubectlClient::Get.wait_for_critools
-
- cri_tools_pod_name = pods[0].dig?("metadata", "name") if pods[0]?
- if no_tar
- Log.info { "NO TAR POD, COPYING TAR FROM HOST" }
- tar_path = AirGap.check_tar(cri_tools_pod_name, namespace: "default", pod: false)
- pods.map do |pod|
- KubectlClient.exec("#{pod.dig?("metadata", "name")} -- cp #{tar_path} /usr/local/bin/")
- status = KubectlClient.exec("#{pod.dig?("metadata", "name")} -- /usr/local/bin/tar --version")
- unless status[:status].success?
- raise "No images with Tar or Shell found. Please deploy a Pod with Tar or Shell to your cluster."
- end
- end
- end
- AirGap.install_cri_binaries(pods)
- end
-
- def self.publish_tarball(tarball, kind_name=false)
- unless kind_name
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
- pods = KubectlClient::Get.pods_by_label(pods, "name", "cri-tools")
- pods.map do |pod|
- pod_name = pod.dig?("metadata", "name")
- KubectlClient.cp("#{tarball} #{pod_name}:/tmp/#{tarball.split("/")[-1]}")
- end
- pods.map do |pod|
- pod_name = pod.dig?("metadata", "name")
- resp = KubectlClient.exec("#{pod_name} -- ctr -n=k8s.io image import /tmp/#{tarball.split("/")[-1]}")
- Log.debug { "Resp: #{resp}" }
- resp
- end
- else
- DockerClient.cp("#{tarball} #{kind_name}:/#{tarball.split("/")[-1]}")
- #DockerClient.exec("#{kind_name} ctr -n=k8s.io image import /#{tarball.split("/")[-1]}")
- `docker exec #{kind_name} ctr -n=k8s.io image import /#{tarball.split("/")[-1]}`
- end
- end
-
- def self.download_cri_tools
- FileUtils.mkdir_p("#{TarClient::TAR_BIN_DIR}")
- Log.info { "download_cri_tools" }
- cmd = "curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/#{CRI_VERSION}/crictl-#{CRI_VERSION}-linux-amd64.tar.gz --output #{TarClient::TAR_BIN_DIR}/crictl-#{CRI_VERSION}-linux-amd64.tar.gz"
- stdout = IO::Memory.new
- Process.run(cmd, shell: true, output: stdout, error: stdout)
-
- cmd = "curl -L https://github.com/containerd/containerd/releases/download/v#{CTR_VERSION}/containerd-#{CTR_VERSION}-linux-amd64.tar.gz --output #{TarClient::TAR_BIN_DIR}/containerd-#{CTR_VERSION}-linux-amd64.tar.gz"
- stdout = IO::Memory.new
- Process.run(cmd, shell: true, output: stdout, error: stdout)
- end
-
- def self.untar_cri_tools
- TarClient.untar("#{TarClient::TAR_BIN_DIR}/crictl-#{CRI_VERSION}-linux-amd64.tar.gz", TarClient::TAR_BIN_DIR)
- TarClient.untar("#{TarClient::TAR_BIN_DIR}/containerd-#{CTR_VERSION}-linux-amd64.tar.gz", TarClient::TAR_TMP_BASE)
- end
-
- def self.pod_images(pods)
- # todo change into a reduce, loop through all containers and append image
- # into final array of images
- pods.map do |pod|
- containers = pod.dig("spec","containers").as_a
- #TODO make this work with multiple containers
- # Gets first image for every pod
- image = containers[0]? && containers[0].dig("image")
- end
- end
-
- def self.install_cri_binaries(cri_tool_pods)
- # AirGap.download_cri_tools()
- AirGap.untar_cri_tools()
- cri_tool_pods.map do |pod|
- KubectlClient.cp("#{TarClient::TAR_BIN_DIR}/crictl #{pod.dig?("metadata", "name")}:/usr/local/bin/crictl")
- KubectlClient.cp("#{TarClient::TAR_BIN_DIR}/ctr #{pod.dig?("metadata", "name")}:/usr/local/bin/ctr")
- end
- end
-
- def self.check_sh(pod_name, namespace="default")
- # --namespace=${POD[1]}
- sh = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /bin/sh > /dev/null")
- sh[:status].success?
- end
-
- def self.check_tar(pod_name, namespace="default", pod=true)
- if pod
- bin_tar = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /bin/tar > /dev/null")
- usr_bin_tar = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /usr/bin/tar > /dev/null")
- usr_local_bin_tar = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /usr/local/bin/tar > /dev/null")
- else
- bin_tar = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /tmp/bin/tar > /dev/null")
- usr_bin_tar = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /tmp/usr/bin/tar > /dev/null")
- usr_local_bin_tar = KubectlClient.exec("--namespace=#{namespace} #{pod_name} -- cat /tmp/usr/local/bin/tar > /dev/null")
- end
- if pod
- (bin_tar[:status].success? && "/bin/tar") || (usr_bin_tar.[:status].success? && "/usr/bin/tar") || (usr_local_bin_tar[:status].success? && "/usr/local/bin/tar")
- else
- (bin_tar[:status].success? && "/tmp/bin/tar") || (usr_bin_tar.[:status].success? && "/tmp/usr/bin/tar") || (usr_local_bin_tar[:status].success? && "/tmp/usr/local/bin/tar")
- end
- end
-
-
- # Makes a copy of an image that is already available on the cluster either as:
- # 1. an image, with shell access, that we have determined to already exist
- # ... or
- # 2. an image (cri-tools) that we have installed into the local docker registry using docker push
- # TODO make this work with runtimes other than containerd
- # TODO make a tool that cleans up the cri images
- def self.create_pod_by_image(image, name="cri-tools")
- template = CriToolsTemplate.new(name, image.to_s).to_s
- File.write("#{name}-manifest.yml", template)
- KubectlClient::Apply.file("#{name}-manifest.yml")
- LOGGING.info KubectlClient::Get.resource_wait_for_install("DaemonSet", name)
- end
-
- def self.pods_with_tar() : KubectlClient::K8sManifestList
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list).select do |pod|
- pod_name = pod.dig?("metadata", "name")
- namespace = pod.dig?("metadata", "namespace")
- if check_sh(pod_name, namespace) && check_tar(pod_name, namespace, pod=true)
- Log.debug { "Found tar and sh Pod: #{pod_name}" }
- true
- else
- false
- end
- end
- end
-
- def self.pods_with_sh() : KubectlClient::K8sManifestList
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list).select do |pod|
- pod_name = pod.dig?("metadata", "name")
- namespace = pod.dig?("metadata", "namespace")
- if check_sh(pod_name, namespace)
- Log.debug { "Found sh Pod: #{pod_name}" }
- true
- else
- false
- end
- end
- end
-
-
- def self.image_pull_policy_config_file?(install_method, config_src, release_name)
- Log.info { "image_pull_policy_config_file" }
- yml = [] of Array(YAML::Any)
- case install_method
- when Helm::InstallMethod::ManifestDirectory
- file_list = Helm::Manifest.manifest_file_list(config_src, silent=false)
- yml = Helm::Manifest.manifest_ymls_from_file_list(file_list)
- when Helm::InstallMethod::HelmChart, Helm::InstallMethod::HelmDirectory
- Helm.template(release_name, config_src, output_file="cnfs/temp_template.yml")
- yml = Helm::Manifest.parse_manifest_as_ymls(template_file_name="cnfs/temp_template.yml")
- else
- raise "config source error: #{install_method}"
- end
- container_image_pull_policy?(yml)
- end
-
- def self.container_image_pull_policy?(yml : Array(YAML::Any))
- Log.info { "container_image_pull_policy" }
- containers = yml.map { |y|
- mc = Helm::Manifest.manifest_containers(y)
- mc.as_a? if mc
- }.flatten.compact
- Log.debug { "containers : #{containers}" }
- found_all = true
- containers.flatten.map do |x|
- Log.debug { "container x: #{x}" }
- ipp = x.dig?("imagePullPolicy")
- image = x.dig?("image")
- Log.debug { "ipp: #{ipp}" }
- Log.debug { "image: #{image.as_s}" if image }
- parsed_image = DockerClient.parse_image(image.as_s) if image
- Log.debug { "parsed_image: #{parsed_image}" }
- # if there is no image pull policy, any image that does not have a tag will
- # force a call out to the default image registry
- if ipp == nil && (parsed_image && parsed_image["tag"] == "latest")
- Log.info { "ipp or tag not found with ipp: #{ipp} and parsed_image: #{parsed_image}" }
- found_all = false
- end
- end
- Log.info { "found_all: #{found_all}" }
- found_all
- end
-
-
- def self.image_pull_policy(file, output_file="")
- input_content = File.read(file)
- output_content = input_content.gsub(/(.*imagePullPolicy:)(.*.)/,"\\1 Never")
-
- # LOGGING.debug "pull policy found?: #{input_content =~ /(.*imagePullPolicy:)(.*)/}"
- # LOGGING.debug "output_content: #{output_content}"
- if output_file.empty?
- input_content = File.write(file, output_content)
- else
- input_content = File.write(output_file, output_content)
- end
- #
- #TODO find out why this doesn't work
- # LOGGING.debug "after conversion: #{File.read(file)}"
- end
-
- def self.tar_name_by_helm_chart(config_src : String)
- FileUtils.mkdir_p(TAR_REPOSITORY_DIR)
- Log.debug { "tar_name_by_helm_chart ls /tmp/repositories:" + "#{Dir.children("/tmp/repositories")}" }
- tar_dir = helm_tar_dir(config_src)
- tgz_files = Find.find(tar_dir, "*.tgz*")
- tar_files = Find.find(tar_dir, "*.tar*") + tgz_files
- tar_name = ""
- tar_name = tar_files[0] if !tar_files.empty?
- Log.info { "tar_name: #{tar_name}" }
- tar_name
- end
-
- def self.tar_info_by_config_src(config_src : String)
- FileUtils.mkdir_p(TAR_REPOSITORY_DIR)
- Log.debug { "tar_info_by_config_src ls /tmp/repositories:" + "#{Dir.children("/tmp/repositories")}" }
- # chaos-mesh/chaos-mesh --version 0.5.1
- repo = config_src.split(" ")[0]
- repo_dir = repo.gsub("/", "_")
- chart_name = repo.split("/")[-1]
- repo_path = "repositories/#{repo_dir}"
- tar_dir = "/tmp/#{repo_path}"
- tar_info = {repo: repo, repo_dir: repo_dir, chart_name: chart_name,
- repo_path: repo_path, tar_dir: tar_dir, tar_name: tar_name_by_helm_chart(config_src)}
- Log.info { "tar_info: #{tar_info}" }
- tar_info
- end
-
- def self.helm_tar_dir(config_src : String)
- FileUtils.mkdir_p(TAR_REPOSITORY_DIR)
- Log.debug { "helm_tar_dir ls /tmp/repositories:" + "#{Dir.children("/tmp/repositories")}" }
- # chaos-mesh/chaos-mesh --version 0.5.1
- repo = config_src.split(" ")[0]
- repo_dir = repo.gsub("/", "_")
- chart_name = repo.split("/")[-1]
- repo_path = "repositories/#{repo_dir}"
- tar_dir = "/tmp/#{repo_path}"
- Log.info { "helm_tar_dir: #{tar_dir}" }
- tar_dir
- end
-
- # todo separate cnf-test-suite cleanup from airgap generic cleanup
- # todo force process.run instead of backtick
- def self.tmp_cleanup
- Log.info { "cleaning up /tmp directories, binaries, and tar files" }
- paths = [
- "/tmp/repositories",
- "/tmp/images",
- "/tmp/bootstrap_images",
- "/tmp/download",
- "/tmp/manifests",
- "/tmp/bin",
- "/tmp/airgapped.tar.gz",
- "/tmp/chaos-daemon.tar",
- "/tmp/chaos-dashboard.tar",
- "/tmp/chaos-daemon.tar",
- "/tmp/chaos-mesh.tar",
- "/tmp/coredns_1.7.1.tar",
- "/tmp/crictl",
- "/tmp/kubectl.tar",
- "/tmp/litmus-operator.tar",
- "/tmp/litmus-runner.tar",
- "/tmp/pingcap-coredns.tar",
- "/tmp/prometheus.tar",
- "/tmp/sonobuoy-logs.tar",
- "/tmp/sonobuoy.tar"
- ]
- FileUtils.rm_rf(paths)
- end
-
-end
diff --git a/utils/airgap/cri-tools-template.yml.ecr b/utils/airgap/cri-tools-template.yml.ecr
deleted file mode 100644
index 1e1607621..000000000
--- a/utils/airgap/cri-tools-template.yml.ecr
+++ /dev/null
@@ -1,40 +0,0 @@
-apiVersion: apps/v1
-kind: DaemonSet
-metadata:
- name: <%= @name %>
-spec:
- selector:
- matchLabels:
- name: <%= @name %>
- template:
- metadata:
- labels:
- name: <%= @name %>
- spec:
- containers:
- - name: <%= @name %>
- image: '<%= @image %>'
- command: ["/bin/sh"]
- args: ["-c", "sleep infinity"]
- volumeMounts:
- - mountPath: /run/containerd/containerd.sock
- name: containerd-volume
- - mountPath: /tmp/usr/bin
- name: usrbin
- - mountPath: /tmp/usr/local/bin
- name: local
- - mountPath: /tmp/bin
- name: bin
- volumes:
- - name: containerd-volume
- hostPath:
- path: /var/run/containerd/containerd.sock
- - name: usrbin
- hostPath:
- path: /usr/bin/
- - name: local
- hostPath:
- path: /usr/local/bin/
- - name: bin
- hostPath:
- path: /bin/
\ No newline at end of file
diff --git a/utils/airgap/shard.yml b/utils/airgap/shard.yml
deleted file mode 100644
index 65e553eae..000000000
--- a/utils/airgap/shard.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: airgap
-version: 0.1.0
-
-authors:
- - W. Watson
- - Denver Williams
-
-targets:
- airgap:
- main: airgap.cr
-
-crystal: '>= 1.0.0'
-
-dependencies:
- halite:
- github: icyleaf/halite
- version: ~> 0.12.0
- totem:
- github: icyleaf/totem
- version: ~> 0.7.0
- # tar:
- # path: utils/tar
- tar:
- github: cnf-testsuite/tar
- # version: ~> 0.1.0
- branch: main
- helm:
- github: cnf-testsuite/helm
- version: ~> 1.0.1
- kubectl_client:
- github: cnf-testsuite/kubectl_client
- version: ~> 1.0.1
- docker_client:
- github: cnf-testsuite/docker_client
- version: ~> 1.0.0
- # find:
- # path: utils/find
- find:
- github: cnf-testsuite/find
- branch: main
-
-license: MIT
diff --git a/utils/airgap/spec/airgap_spec.cr b/utils/airgap/spec/airgap_spec.cr
deleted file mode 100644
index f220c9762..000000000
--- a/utils/airgap/spec/airgap_spec.cr
+++ /dev/null
@@ -1,179 +0,0 @@
-require "./spec_helper"
-require "colorize"
-require "kubectl_client"
-require "file_utils"
-
-describe "AirGap" do
-
- it "'image_pull_policy' should change all imagepull policy references to never", tags: ["airgap"] do
-
- AirGap.image_pull_policy("./spec/fixtures/litmus-operator-v1.13.2.yaml", "/tmp/imagetest.yml")
- (File.exists?("/tmp/imagetest.yml")).should be_true
- resp = File.read("/tmp/imagetest.yml")
- (resp).match(/imagePullPolicy: Always/).should be_nil
- (resp).match(/imagePullPolicy: Never/).should_not be_nil
- ensure
- `rm ./tmp/imagetest.yml`
- end
-
- it "'#AirGap.publish_tarball' should execute publish a tarball to a bootstrapped cluster", tags: ["airgap"] do
- AirGap.download_cri_tools
- AirGap.bootstrap_cluster()
- tarball_name = "./spec/fixtures/testimage.tar.gz"
- resp = AirGap.publish_tarball(tarball_name)
- resp[0][:output].to_s.match(/unpacking docker.io\/testimage\/testimage:test/).should_not be_nil
- end
-
- it "'.tar_helm_repo' should create a tar file from a helm repository that has options", tags: ["airgap"] do
- Helm.helm_repo_add("chaos-mesh", "https://charts.chaos-mesh.org")
- AirGap.tar_helm_repo("chaos-mesh/chaos-mesh --version 0.5.1", "/tmp/airgapped.tar")
- (File.exists?("/tmp/airgapped.tar")).should be_true
- resp = `tar -tvf /tmp/airgapped.tar`
- Log.info { "Tar Filelist: #{resp}" }
- (/repositories\/chaos-mesh_chaos-mesh/).should_not be_nil
- ensure
- `rm /tmp/airgapped.tar`
- end
-
- it "'.tar_helm_repo' should create a tar file from a helm repository", tags: ["airgap"] do
- AirGap.tar_helm_repo("stable/coredns", "/tmp/airgapped.tar")
- (File.exists?("/tmp/airgapped.tar")).should be_true
- resp = `tar -tvf /tmp/airgapped.tar`
- Log.info { "Tar Filelist: #{resp}" }
- (/repositories\/stable_coredns/).should_not be_nil
- ensure
- `rm /tmp/airgapped.tar`
- end
-
- it "'.tar_manifest' should create a tar file from a manifest", tags: ["airgap"] do
- # KubectlClient::Apply.file("https://litmuschaos.github.io/litmus/litmus-operator-v1.13.2.yaml")
- AirGap.tar_manifest("https://litmuschaos.github.io/litmus/litmus-operator-v1.13.2.yaml", "/tmp/airgapped.tar")
- (File.exists?("/tmp/airgapped.tar")).should be_true
- resp = `tar -tvf /tmp/airgapped.tar`
- Log.info { "Tar Filelist: #{resp}" }
- (/litmus-operator-v1.13.2.yaml/).should_not be_nil
- ensure
- `rm /tmp/airgapped.tar`
- end
-
- it "'#AirGap.check_tar' should determine if a pod has the tar binary on it", tags: ["airgap"] do
- pods = KubectlClient::Get.pods
- resp = AirGap.check_tar(pods.dig?("metadata", "name"))
- resp.should be_false
- end
-
- it "'#AirGap.check_tar' should determine if the host has the tar binary on it", tags: ["airgap"] do
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
- pods = KubectlClient::Get.pods_by_label(pods, "name", "cri-tools")
- resp = AirGap.check_tar(pods[0].dig?("metadata", "name"), pod=false)
- Log.debug { "Path to tar on the host filesystem: #{resp}" }
- resp.should_not be_nil
- end
-
- it "'#AirGap.check_sh' should determine if a pod has a shell on it", tags: ["airgap"] do
- pods = KubectlClient::Get.pods
- resp = AirGap.check_sh(pods.dig?("metadata", "name"))
- resp.should be_false
- end
-
- it "'#AirGap.pods_with_tar' should determine if there are any pods with a shell and tar on them", tags: ["airgap"] do
- #TODO Should install cri-tools or container with tar before running spec.
- resp = AirGap.pods_with_tar()
- if resp[0].dig?("metadata", "name")
- Log.debug { "Pods With Tar Found #{resp[0].dig?("metadata", "name")}" }
- end
- (resp[0].dig?("kind")).should eq "Pod"
- end
-
- it "'#AirGap.pods_with_sh' should determine if there are any pods with a shell on them", tags: ["airgap"] do
- resp = AirGap.pods_with_sh()
- (resp[0].dig?("kind")).should eq "Pod"
- end
-
- it "'#AirGap.pod_images' should retrieve all of the images for the pods with tar", tags: ["airgap"] do
- pods = AirGap.pods_with_tar()
- resp = AirGap.pod_images(pods)
- (resp[0]).should_not be_nil
- end
-
-
- it "'#AirGap.download_cri_tools' should download the cri tools", tags: ["airgap-tools"] do
- resp = AirGap.download_cri_tools()
- (File.exists?("#{TarClient::TAR_BIN_DIR}/crictl-#{AirGap::CRI_VERSION}-linux-amd64.tar.gz")).should be_true
- (File.exists?("#{TarClient::TAR_BIN_DIR}/containerd-#{AirGap::CTR_VERSION}-linux-amd64.tar.gz")).should be_true
- end
-
- it "'#AirGap.untar_cri_tools' should untar the cri tools", tags: ["airgap-tools"] do
- AirGap.download_cri_tools
- resp = AirGap.untar_cri_tools()
- (File.exists?("#{TarClient::TAR_BIN_DIR}/crictl")).should be_true
- (File.exists?("#{TarClient::TAR_BIN_DIR}/ctr")).should be_true
- end
-
- it "'#AirGap.create_pod_by_image' should install the cri pod in the cluster", tags: ["airgap-tools"] do
- pods = AirGap.pods_with_tar()
- if pods.empty?
- Log.info { `./cnf-testsuite cnf_setup cnf-config=./example-cnfs/envoy/cnf-testsuite.yml` }
- $?.success?.should be_true
- pods = AirGap.pods_with_tar()
- end
- images_with_tar = AirGap.pod_images(pods)
- image = images_with_tar[0]
- Log.info { "Image with TAR: #{image}" }
- resp = AirGap.create_pod_by_image(image)
- (resp).should be_true
- ensure
- KubectlClient::Delete.command("daemonset cri-tools")
- Log.info { `./cnf-testsuite cnf_cleanup cnf-config=./example-cnfs/envoy/cnf-testsuite.yml wait_count=0` }
- end
-
- it "'#AirGap.bootstrap_cluster' should install the cri tools in the cluster that has an image with tar avaliable on the node.", tags: ["airgap-tools"] do
- pods = AirGap.pods_with_tar()
- if pods.empty?
- Log.info { `./cnf-testsuite cnf_setup cnf-config=./example-cnfs/envoy/cnf-testsuite.yml` }
- $?.success?.should be_true
- end
- AirGap.bootstrap_cluster()
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
- pods = KubectlClient::Get.pods_by_label(pods, "name", "cri-tools")
- # Get the generated name of the cri-tools per node
- pods.map do |pod|
- pod_name = pod.dig?("metadata", "name")
- containers = pod.dig("spec","containers").as_a
- image = containers[0]? && containers[0].dig("image")
- Log.info { "CRI Pod Image: #{image}" }
- sh = KubectlClient.exec("#{pod_name} -- cat /usr/local/bin/crictl > /dev/null")
- sh[:status].success?
- sh = KubectlClient.exec("#{pod_name} -- cat /usr/local/bin/ctr > /dev/null")
- sh[:status].success?
- end
- ensure
- KubectlClient::Delete.command("daemonset cri-tools")
- Log.info { `./cnf-testsuite cnf_cleanup cnf-config=./example-cnfs/envoy/cnf-testsuite.yml wait_count=0` }
- end
-
-
- it "'#AirGap.bootstrap_cluster' should install the cri tools in the cluster that does not have tar in the images", tags: ["airgap-tools"] do
- KubectlClient::Delete.command("daemonset cri-tools")
- pods = AirGap.pods_with_tar()
- # Skip the test if tar is available outside of the cri tools
- if pods.empty?
- AirGap.bootstrap_cluster()
- pods = KubectlClient::Get.pods_by_nodes(KubectlClient::Get.schedulable_nodes_list)
- pods = KubectlClient::Get.pods_by_label(pods, "name", "cri-tools")
- # Get the generated name of the cri-tools per node
- pods.map do |pod|
- pod_name = pod.dig?("metadata", "name")
- sh = KubectlClient.exec("#{pod_name} -- cat /usr/local/bin/crictl > /dev/null")
- sh[:status].success?
- sh = KubectlClient.exec("#{pod_name} -- cat /usr/local/bin/ctr > /dev/null")
- sh[:status].success?
- end
- end
- ensure
- KubectlClient::Delete.command("daemonset cri-tools")
- end
-end
-
-
-
diff --git a/utils/airgap/spec/spec_helper.cr b/utils/airgap/spec/spec_helper.cr
deleted file mode 100644
index 2f4049175..000000000
--- a/utils/airgap/spec/spec_helper.cr
+++ /dev/null
@@ -1,8 +0,0 @@
-require "spec"
-require "colorize"
-require "../airgap.cr"
-require "helm"
-
-ENV["CRYSTAL_ENV"] = "TEST"
-
-