From ef1d8c7d6f64187a06631c7e5242af91f540b8d1 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Damisetti Date: Wed, 18 Dec 2024 15:19:28 +0530 Subject: [PATCH] Enhancement: Updated deletion procedure for HCP (#362) - Updated deletion procedure for HCP - Updated MCE creation role to take catalogsource name from user input Signed-off-by: DAMISETTI-VEERABHADRARAO --- docs/set-variables-group-vars.md | 1 + .../default/group_vars/hcp.yaml.template | 1 + roles/boot_LPAR_hcp/tasks/main.yaml | 2 +- roles/boot_zvm_nodes/tasks/main.yaml | 2 +- .../tasks/main.yaml | 57 ++++++++++++------- .../tasks/main.yaml | 30 +++++++--- .../templates/Subscription.yaml.j2 | 2 +- 7 files changed, 63 insertions(+), 32 deletions(-) diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md index 1e342d0c..16c551b6 100644 --- a/docs/set-variables-group-vars.md +++ b/docs/set-variables-group-vars.md @@ -287,6 +287,7 @@ **hcp.pkgs** | list of packages for different hosts | **hcp.mce.version** | version for multicluster-engine Operator | 2.4 **hcp.mce.instance_name** | name of the MultiClusterEngine instance | engine +**hcp.mce.catalogsource_name** | Name of the catalogsource for operatorhub | redhat-operators **hcp.mce.delete** | true or false - deletes mce and related resources while running deletion playbook | true **hcp.asc.url_for_ocp_release_file** | Add URL for OCP release.txt File | https://... ..../release.txt **hcp.asc.db_volume_size** | DatabaseStorage Volume Size | 10Gi diff --git a/inventories/default/group_vars/hcp.yaml.template b/inventories/default/group_vars/hcp.yaml.template index 5998957f..38a1939c 100644 --- a/inventories/default/group_vars/hcp.yaml.template +++ b/inventories/default/group_vars/hcp.yaml.template @@ -17,6 +17,7 @@ hcp: version: instance_name: engine delete: false + catalogsource_name: redhat-operators # AgentServiceConfig Parameters diff --git a/roles/boot_LPAR_hcp/tasks/main.yaml b/roles/boot_LPAR_hcp/tasks/main.yaml index fef88915..cf433604 100644 --- a/roles/boot_LPAR_hcp/tasks/main.yaml +++ b/roles/boot_LPAR_hcp/tasks/main.yaml @@ -12,7 +12,7 @@ - name: Booting LPAR shell: | - python /root/ansible_workdir/boot_lpar.py \ + python3 /root/ansible_workdir/boot_lpar.py \ --cpcname "{{ hcp.data_plane.lpar.nodes[item].name.split('lp')[0] }}" \ --lparname "{{ hcp.data_plane.lpar.nodes[item].name }}" \ --hmchost "{{ hcp.data_plane.lpar.nodes[item].hmc_host }}" \ diff --git a/roles/boot_zvm_nodes/tasks/main.yaml b/roles/boot_zvm_nodes/tasks/main.yaml index 80c490b4..448f5849 100644 --- a/roles/boot_zvm_nodes/tasks/main.yaml +++ b/roles/boot_zvm_nodes/tasks/main.yaml @@ -12,7 +12,7 @@ - name: Booting zvm node shell: | - python /root/ansible_workdir/boot_nodes.py \ + python3 /root/ansible_workdir/boot_nodes.py \ --zvmname "{{ zvm.nodes[item].name }}" \ --zvmhost "{{ zvm.nodes[item].host }}" \ --zvmuser "{{ zvm.nodes[item].user }}" \ diff --git a/roles/delete_resources_bastion_hcp/tasks/main.yaml b/roles/delete_resources_bastion_hcp/tasks/main.yaml index 7599f192..8ec54cec 100644 --- a/roles/delete_resources_bastion_hcp/tasks/main.yaml +++ b/roles/delete_resources_bastion_hcp/tasks/main.yaml @@ -1,13 +1,23 @@ --- - name: Login to Management Cluster - command: oc login {{ api_server }} -u {{ user_name }} -p {{ password }} --insecure-skip-tls-verify=true + ansible.builtin.command: oc login {{ api_server }} -u {{ user_name }} -p {{ password }} --insecure-skip-tls-verify=true + +- name: Checking Nodepool + ansible.builtin.command: oc get np -n {{ hcp.control_plane.clusters_namespace }} + register: np_check - name: Scale in Nodepool - command: oc -n {{ hcp.control_plane.clusters_namespace }} scale nodepool {{ hcp.control_plane.hosted_cluster_name }} --replicas 0 + ansible.builtin.command: oc -n {{ hcp.control_plane.clusters_namespace }} scale nodepool {{ hcp.control_plane.hosted_cluster_name }} --replicas 0 + when: "'{{ hcp.control_plane.hosted_cluster_name }}' in np_check.stdout" + +- name: Checking Hosted Cluster + ansible.builtin.command: oc get hc -n {{ hcp.control_plane.clusters_namespace }} + register: hc_check - name: Create Kubeconfig for Hosted Cluster - shell: hcp create kubeconfig --namespace {{ hcp.control_plane.clusters_namespace }} --name {{ hcp.control_plane.hosted_cluster_name }} > /root/ansible_workdir/hcp-kubeconfig + ansible.builtin.shell: hcp create kubeconfig --namespace {{ hcp.control_plane.clusters_namespace }} --name {{ hcp.control_plane.hosted_cluster_name }} > /root/ansible_workdir/hcp-kubeconfig + when: "'{{ hcp.control_plane.hosted_cluster_name }}' in hc_check.stdout" - block: - name: Wait for Worker Nodes to Detach @@ -21,28 +31,28 @@ delay: 15 rescue: - name: Getting basedomain - shell: oc get hc {{ hcp.control_plane.hosted_cluster_name }} -n {{ hcp.control_plane.clusters_namespace }} -o json | jq -r '.spec.dns.baseDomain' + ansible.builtin.shell: oc get hc {{ hcp.control_plane.hosted_cluster_name }} -n {{ hcp.control_plane.clusters_namespace }} -o json | jq -r '.spec.dns.baseDomain' register: base_domain - name: Deleting the compute nodes manually - command: oc delete no compute-{{item}}.{{hcp.control_plane.hosted_cluster_name }}.{{ base_domain.stdout }} --kubeconfig /root/ansible_workdir/hcp-kubeconfig + ansible.builtin.command: oc delete no compute-{{item}}.{{hcp.control_plane.hosted_cluster_name }}.{{ base_domain.stdout }} --kubeconfig /root/ansible_workdir/hcp-kubeconfig loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" - name: Get machine names - command: oc get machine.cluster.x-k8s.io -n {{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }} --no-headers + ansible.builtin.command: oc get machine.cluster.x-k8s.io -n {{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }} --no-headers register: machines_info - name: Create List for machines - set_fact: + ansible.builtin.set_fact: machines: [] - name: Get the List of machines - set_fact: + ansible.builtin.set_fact: machines: "{{ machines + [machines_info.stdout.split('\n')[item].split(' ')[0]] }}" loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" - name: Patch the machines to remove finalizers - shell: oc patch machine.cluster.x-k8s.io "{{ machines[item] }}" -n "{{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }}" -p '{"metadata":{"finalizers":null}}' --type=merge + ansible.builtin.shell: oc patch machine.cluster.x-k8s.io "{{ machines[item] }}" -n "{{ hcp.control_plane.clusters_namespace }}-{{ hcp.control_plane.hosted_cluster_name }}" -p '{"metadata":{"finalizers":null}}' --type=merge loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" - name: Wait for Agentmachines to delete @@ -66,24 +76,24 @@ delay: 10 - name: Get agent names - command: oc get agents -n {{ hcp.control_plane.hosted_cluster_name }}-agents --no-headers + ansible.builtin.command: oc get agents -n {{ hcp.control_plane.hosted_cluster_name }}-agents --no-headers register: agents_info - name: Create List for agents - set_fact: + ansible.builtin.set_fact: agents: [] - name: Get a List of agents - set_fact: + ansible.builtin.set_fact: agents: "{{ agents + [agents_info.stdout.split('\n')[item].split(' ')[0]] }}" - loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" + loop: "{{ range(agents_info.stdout_lines | length ) | list }}" - name: Delete Agents - command: oc delete agent {{ agents[item] }} -n {{ hcp.control_plane.hosted_cluster_name }}-agents - loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" + ansible.builtin.command: oc delete agent {{ agents[item] }} -n {{ hcp.control_plane.hosted_cluster_name }}-agents + loop: "{{ range(agents_info.stdout_lines | length ) | list }}" - name: Remove workdir - file: + ansible.builtin.file: path: /root/ansible_workdir state: absent @@ -95,8 +105,13 @@ name: "{{ hcp.control_plane.hosted_cluster_name }}" namespace: "{{ hcp.control_plane.hosted_cluster_name }}-agents" +- name: Checking Hosted Cluster + ansible.builtin.command: oc get hc -n {{ hcp.control_plane.clusters_namespace }} + register: hc_check + - name: Destroy Hosted Control Plane - command: hcp destroy cluster agent --name {{ hcp.control_plane.hosted_cluster_name }} --namespace {{ hcp.control_plane.clusters_namespace }} + ansible.builtin.command: hcp destroy cluster agent --name {{ hcp.control_plane.hosted_cluster_name }} --namespace {{ hcp.control_plane.clusters_namespace }} + when: "'{{ hcp.control_plane.hosted_cluster_name }}' in hc_check.stdout" - name: Delete Clusters Namespace k8s: @@ -106,7 +121,7 @@ state: absent - name: Wait for managed cluster resource to be deleted - shell: oc get managedcluster "{{ hcp.control_plane.hosted_cluster_name }}" + ansible.builtin.shell: oc get managedcluster "{{ hcp.control_plane.hosted_cluster_name }}" register: managedcluster until: managedcluster.rc != 0 retries: 50 @@ -118,11 +133,11 @@ when: managedcluster.rc == 0 and managedcluster.attempts >= 40 - name: Disable local-cluster component in MCE - command: oc patch mce {{ hcp.mce.instance_name }} -p '{"spec":{"overrides":{"components":[{"name":"local-cluster","enabled":false}]}}}' --type merge + ansible.builtin.command: oc patch mce {{ hcp.mce.instance_name }} -p '{"spec":{"overrides":{"components":[{"name":"local-cluster","enabled":false}]}}}' --type merge when: hcp.mce.delete == true - name: Wait for local-cluster components to be deleted - shell: oc get ns local-cluster + ansible.builtin.shell: oc get ns local-cluster register: localcluster until: localcluster.rc != 0 retries: 40 @@ -213,4 +228,4 @@ file: path: /var/www/html/rootfs.img state: absent - when: hcp.compute_node_type | lower == 'zvm' + when: hcp.compute_node_type | lower == 'zvm' \ No newline at end of file diff --git a/roles/delete_resources_kvm_host_hcp/tasks/main.yaml b/roles/delete_resources_kvm_host_hcp/tasks/main.yaml index 029b9f3c..c32cb8a0 100644 --- a/roles/delete_resources_kvm_host_hcp/tasks/main.yaml +++ b/roles/delete_resources_kvm_host_hcp/tasks/main.yaml @@ -1,36 +1,50 @@ --- +- name: List all VMs + community.libvirt.virt: + command: list_vms + register: all_vms + +- name: List only running VMs + community.libvirt.virt: + command: list_vms + state: running + register: running_vms - name: Destroy Agent VMs - command: virsh destroy {{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} + ansible.builtin.command: virsh destroy {{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" + when: "{{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} in all_vms.list_vms" - name: Undefine the Agents - command: virsh undefine {{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} --remove-all-storage + ansible.builtin.command: virsh undefine {{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} --remove-all-storage loop: "{{ range(hcp.data_plane.compute_count|int) | list }}" + when: "{{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} in running_vms.list_vms" - name: Delete the initrd.img - file: + ansible.builtin.file: path: /var/lib/libvirt/images/pxeboot/initrd.img state: absent when: ( hcp.data_plane.kvm.boot_method | lower != 'iso' and hcp.compute_node_type | lower == 'kvm' ) or hcp.compute_node_type | lower != 'kvm' - name: Delete the kernel.img - file: + ansible.builtin.file: path: /var/lib/libvirt/images/pxeboot/kernel.img state: absent when: ( hcp.data_plane.kvm.boot_method | lower != 'iso' and hcp.compute_node_type | lower == 'kvm' ) or hcp.compute_node_type | lower != 'kvm' - name: Delete ISO - file: + ansible.builtin.file: path: /var/lib/libvirt/images/pxeboot/image.iso state: absent when: hcp.data_plane.kvm.boot_method | lower == 'iso' and hcp.compute_node_type | lower == 'kvm' - name: Destroy Bastion - command: virsh destroy {{ hcp.control_plane.hosted_cluster_name }}-bastion + ansible.builtin.command: virsh destroy {{ hcp.control_plane.hosted_cluster_name }}-bastion + when: "{{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} in all_vms.list_vms" - name: Undefine Bastion - command: virsh undefine {{ hcp.control_plane.hosted_cluster_name }}-bastion --remove-all-storage + ansible.builtin.command: virsh undefine {{ hcp.control_plane.hosted_cluster_name }}-bastion --remove-all-storage + when: "{{ hcp.control_plane.hosted_cluster_name }}-agent-{{ item }} in running_vms.list_vms" - name: Stop the storage pool community.libvirt.virt_pool: @@ -48,4 +62,4 @@ ansible.builtin.file: path: "{{ hcp.data_plane.kvm.storage.qcow.pool_path }}{{ hcp.control_plane.hosted_cluster_name }}" state: absent - when: hcp.data_plane.kvm.storage.type | lower != 'dasd' and hcp.compute_node_type | lower == 'kvm' + when: hcp.data_plane.kvm.storage.type | lower != 'dasd' and hcp.compute_node_type | lower == 'kvm' \ No newline at end of file diff --git a/roles/install_mce_operator/templates/Subscription.yaml.j2 b/roles/install_mce_operator/templates/Subscription.yaml.j2 index f85b823b..2a5ec6f0 100644 --- a/roles/install_mce_operator/templates/Subscription.yaml.j2 +++ b/roles/install_mce_operator/templates/Subscription.yaml.j2 @@ -5,7 +5,7 @@ metadata: namespace: "{{ hcp.asc.mce_namespace }}" spec: sourceNamespace: openshift-marketplace - source: redhat-operators + source: {{ hcp.mce.catalogsource_name }} channel: stable-{{ hcp.mce.version }} installPlanApproval: Automatic name: multicluster-engine