From 21f7333cbd55c86950cf8a16757f56debe9536cd Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Sun, 22 Oct 2023 17:00:20 +0300 Subject: [PATCH] Support number of workers for the virtual cluster also add a make target to redeploy the containers on the cluster for easy development cycle Signed-off-by: Sebastian Sch --- Makefile | 3 + doc/testing-virtual-machine.md | 12 +++- hack/run-e2e-conformance-virtual-cluster.sh | 35 +++++++--- hack/run-e2e-conformance-virtual-ocp.sh | 22 +++++-- hack/virtual-cluster-redeploy.sh | 72 +++++++++++++++++++++ 5 files changed, 128 insertions(+), 16 deletions(-) create mode 100644 hack/virtual-cluster-redeploy.sh diff --git a/Makefile b/Makefile index a523236e3..341e20e2c 100644 --- a/Makefile +++ b/Makefile @@ -197,6 +197,9 @@ test-e2e-conformance-virtual-ocp-cluster-ci: test-e2e-conformance-virtual-ocp-cluster: SKIP_DELETE=TRUE ./hack/run-e2e-conformance-virtual-ocp.sh +redeploy-operator-virtual-cluster: + ./hack/virtual-cluster-redeploy.sh + test-e2e-validation-only: SUITE=./test/validation ./hack/run-e2e-conformance.sh diff --git a/doc/testing-virtual-machine.md b/doc/testing-virtual-machine.md index 31fce2457..f8b354243 100644 --- a/doc/testing-virtual-machine.md +++ b/doc/testing-virtual-machine.md @@ -35,6 +35,8 @@ WantedBy=network-online.target * make * go +*Note:* For openshift you will also need a pull secret that you can download from the [Redhat Console](https://console.redhat.com/) + ## Deploy the cluster use the deployment [script](../hack/run-e2e-conformance-virtual-cluster.sh), this will deploy a k8s cluster @@ -55,4 +57,12 @@ To use the cluster after the deployment you need to export the kubeconfig ``` export KUBECONFIG=$HOME/.kcli/clusters/virtual/auth/kubeconfig -``` \ No newline at end of file +``` + +It's possible to also configure the number of workers using `NUM_OF_WORKERS`. + +*NOTE:* For OCP then min number is 3 and for k8s is 2. + +To work on the operator you can change the code and rebuild the operator using +`make redeploy-operator-virtual-cluster`. +You need to tell the cluster type for ocp `CLUSTER_TYPE=openshift` and for k8s `CLUSTER_TYPE=kubernetes` diff --git a/hack/run-e2e-conformance-virtual-cluster.sh b/hack/run-e2e-conformance-virtual-cluster.sh index 8bba27e2f..ac4d992dc 100755 --- a/hack/run-e2e-conformance-virtual-cluster.sh +++ b/hack/run-e2e-conformance-virtual-cluster.sh @@ -11,6 +11,14 @@ HOME="/root" here="$(dirname "$(readlink --canonicalize "${BASH_SOURCE[0]}")")" root="$(readlink --canonicalize "$here/..")" +NUM_OF_WORKERS=${NUM_OF_WORKERS:-2} +total_number_of_nodes=$((1 + NUM_OF_WORKERS)) + +if [ "$NUM_OF_WORKERS" -lt 2 ]; then + echo "Min number of workers is 2" + exit 1 +fi + check_requirements() { for cmd in kcli virsh virt-edit podman make go; do if ! command -v "$cmd" &> /dev/null; then @@ -49,7 +57,7 @@ api_ip: $api_ip virtual_router_id: $virtual_router_id domain: $domain_name ctlplanes: 1 -workers: 2 +workers: $NUM_OF_WORKERS ingress: false machine: q35 engine: crio @@ -92,7 +100,7 @@ sleep_time=10 until $ready || [ $ATTEMPTS -eq $MAX_ATTEMPTS ] do echo "waiting for cluster to be ready" - if [ `kubectl get node | grep Ready | wc -l` == 3 ]; then + if [ `kubectl get node | grep Ready | wc -l` == $total_number_of_nodes ]; then echo "cluster is ready" ready=true else @@ -108,13 +116,21 @@ if ! $ready; then exit 1 fi +function update_worker_labels() { echo "## label cluster workers as sriov capable" -kubectl label node $cluster_name-worker-0.$domain_name feature.node.kubernetes.io/network-sriov.capable=true --overwrite -kubectl label node $cluster_name-worker-1.$domain_name feature.node.kubernetes.io/network-sriov.capable=true --overwrite +for ((num=0; num ./${cluster_name}-plan.yaml -tag: 4.14.0-rc.6 +tag: $OCP_VERSION ctlplane_memory: 32768 worker_memory: 8192 pool: default @@ -51,7 +60,7 @@ api_ip: $api_ip virtual_router_id: $virtual_router_id domain: $domain_name ctlplanes: 1 -workers: 3 +workers: $NUM_OF_WORKERS machine: q35 network_type: OVNKubernetes pull_secret: /root/openshift_pull.json @@ -97,7 +106,7 @@ sleep_time=10 until $ready || [ $ATTEMPTS -eq $MAX_ATTEMPTS ] do echo "waiting for cluster to be ready" - if [ `kubectl get node | grep Ready | wc -l` == 4 ]; then + if [ `kubectl get node | grep Ready | wc -l` == $total_number_of_nodes ]; then echo "cluster is ready" ready=true else @@ -114,9 +123,10 @@ if ! $ready; then fi echo "## label cluster workers as sriov capable" -kubectl label node $cluster_name-worker-0.$domain_name feature.node.kubernetes.io/network-sriov.capable=true --overwrite -kubectl label node $cluster_name-worker-1.$domain_name feature.node.kubernetes.io/network-sriov.capable=true --overwrite -kubectl label node $cluster_name-worker-2.$domain_name feature.node.kubernetes.io/network-sriov.capable=true --overwrite +for ((num=0; num registry-login.conf + + internal_registry="image-registry.openshift-image-registry.svc:5000" + pass=$( jq .\"$internal_registry\".password registry-login.conf ) + + registry="default-route-openshift-image-registry.apps.${cluster_name}.${domain_name}" + podman login -u serviceaccount -p ${pass:1:-1} $registry --tls-verify=false + + export SRIOV_NETWORK_OPERATOR_IMAGE="$registry/$NAMESPACE/sriov-network-operator:latest" + export SRIOV_NETWORK_CONFIG_DAEMON_IMAGE="$registry/$NAMESPACE/sriov-network-config-daemon:latest" + export SRIOV_NETWORK_WEBHOOK_IMAGE="$registry/$NAMESPACE/sriov-network-operator-webhook:latest" +else + echo "K8S" + cluster_name=${CLUSTER_NAME:-virtual} + export NAMESPACE="sriov-network-operator" + export KUBECONFIG=/root/.kcli/clusters/$cluster_name/auth/kubeconfig + + controller_ip=`kubectl get node -o wide | grep ctlp | awk '{print $6}'` + + export SRIOV_NETWORK_OPERATOR_IMAGE="$controller_ip:5000/sriov-network-operator:latest" + export SRIOV_NETWORK_CONFIG_DAEMON_IMAGE="$controller_ip:5000/sriov-network-config-daemon:latest" + export SRIOV_NETWORK_WEBHOOK_IMAGE="$controller_ip:5000/sriov-network-operator-webhook:latest" +fi + +export ENABLE_ADMISSION_CONTROLLER=true +export SKIP_VAR_SET="" +export OPERATOR_NAMESPACE=$NAMESPACE +export OPERATOR_EXEC=kubectl +export DEV_MODE=TRUE +export CLUSTER_HAS_EMULATED_PF=TRUE + +echo "## build operator image" +podman build -t "${SRIOV_NETWORK_OPERATOR_IMAGE}" -f "${root}/Dockerfile" "${root}" + +echo "## build daemon image" +podman build -t "${SRIOV_NETWORK_CONFIG_DAEMON_IMAGE}" -f "${root}/Dockerfile.sriov-network-config-daemon" "${root}" + +echo "## build webhook image" +podman build -t "${SRIOV_NETWORK_WEBHOOK_IMAGE}" -f "${root}/Dockerfile.webhook" "${root}" + +podman push --tls-verify=false "${SRIOV_NETWORK_OPERATOR_IMAGE}" +podman push --tls-verify=false "${SRIOV_NETWORK_CONFIG_DAEMON_IMAGE}" +podman push --tls-verify=false "${SRIOV_NETWORK_WEBHOOK_IMAGE}" + +if [ $CLUSTER_TYPE == "openshift" ]; then + export SRIOV_NETWORK_OPERATOR_IMAGE="image-registry.openshift-image-registry.svc:5000/$NAMESPACE/sriov-network-operator:latest" + export SRIOV_NETWORK_CONFIG_DAEMON_IMAGE="image-registry.openshift-image-registry.svc:5000/$NAMESPACE/sriov-network-config-daemon:latest" + export SRIOV_NETWORK_WEBHOOK_IMAGE="image-registry.openshift-image-registry.svc:5000/$NAMESPACE/sriov-network-operator-webhook:latest" +fi + +echo "## deploying SRIOV Network Operator" +hack/deploy-setup.sh $NAMESPACE + +kubectl -n ${NAMESPACE} delete po --all