From bb228443fd66d3c619bd68a29cabad0093e3b1c6 Mon Sep 17 00:00:00 2001 From: Alvaro Saurin Date: Wed, 20 Feb 2019 17:44:09 +0100 Subject: [PATCH] Synchronize everythihg before starting an orchestration. Replace all the `mine.get` calls by the more compact `get_with_expr` function. bsc#1124784 Signed-off-by: Alvaro Saurin --- salt/_modules/caasp_orch.py | 16 +++++++++++++++ salt/orch/etcd-migrate.sls | 3 +++ salt/orch/force-removal.sls | 5 ++++- salt/orch/kubernetes.sls | 9 ++++++--- salt/orch/prepare-product-migration.sls | 3 +++ salt/orch/removal.sls | 9 ++++++--- salt/orch/update-etc-hosts.sls | 7 ++++++- salt/orch/update.sls | 26 ++++++++++++++----------- 8 files changed, 59 insertions(+), 19 deletions(-) create mode 100644 salt/_modules/caasp_orch.py diff --git a/salt/_modules/caasp_orch.py b/salt/_modules/caasp_orch.py new file mode 100644 index 000000000..56e6b8a75 --- /dev/null +++ b/salt/_modules/caasp_orch.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import + + +def __virtual__(): + return "caasp_orch" + + +def sync_all(): + ''' + Syncronize everything before starting a new orchestration + ''' + __utils__['caasp_log.debug']('orch: refreshing all') + __salt__['saltutil.sync_all'](refresh=True) + + __utils__['caasp_log.debug']('orch: synchronizing the mine') + __salt__['saltutil.runner']('mine.update', tgt='*', clear=True) diff --git a/salt/orch/etcd-migrate.sls b/salt/orch/etcd-migrate.sls index 8725dc56e..2fb68f0d1 100644 --- a/salt/orch/etcd-migrate.sls +++ b/salt/orch/etcd-migrate.sls @@ -1,3 +1,6 @@ +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + # Generic Updates update_pillar: salt.function: diff --git a/salt/orch/force-removal.sls b/salt/orch/force-removal.sls index 088d5a9e8..31b74d1ad 100644 --- a/salt/orch/force-removal.sls +++ b/salt/orch/force-removal.sls @@ -1,4 +1,7 @@ -# must provide the node (id) to be removed in the 'target' pillar +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + +{#- must provide the node (id) to be removed in the 'target' pillar #} {%- set target = salt['pillar.get']('target') %} {%- set super_master = salt.saltutil.runner('manage.up', tgt='G@roles:kube-master and not ' + target, expr_form='compound')|first %} diff --git a/salt/orch/kubernetes.sls b/salt/orch/kubernetes.sls index 1867fd08c..98edd42b5 100644 --- a/salt/orch/kubernetes.sls +++ b/salt/orch/kubernetes.sls @@ -1,8 +1,11 @@ +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + {%- set default_batch = salt['pillar.get']('default_batch', 5) %} -{%- set etcd_members = salt.saltutil.runner('mine.get', tgt='G@roles:etcd', fun='network.interfaces', tgt_type='compound').keys() %} -{%- set masters = salt.saltutil.runner('mine.get', tgt='G@roles:kube-master', fun='network.interfaces', tgt_type='compound').keys() %} -{%- set minions = salt.saltutil.runner('mine.get', tgt='G@roles:kube-minion', fun='network.interfaces', tgt_type='compound').keys() %} +{%- set etcd_members = salt.caasp_nodes.get_with_expr('G@roles:etcd') %} +{%- set masters = salt.caasp_nodes.get_with_expr('G@roles:kube-master') %} +{%- set minions = salt.caasp_nodes.get_with_expr('G@roles:kube-minion') %} {%- set super_master = masters|first %} diff --git a/salt/orch/prepare-product-migration.sls b/salt/orch/prepare-product-migration.sls index 73e214c53..cf3e14688 100644 --- a/salt/orch/prepare-product-migration.sls +++ b/salt/orch/prepare-product-migration.sls @@ -1,3 +1,6 @@ +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + {#- Get a list of nodes seem to be down or unresponsive #} {#- This sends a "are you still there?" message to all #} {#- the nodes and wait for a response, so it takes some time. #} diff --git a/salt/orch/removal.sls b/salt/orch/removal.sls index 7ff585292..1258f859c 100644 --- a/salt/orch/removal.sls +++ b/salt/orch/removal.sls @@ -1,3 +1,6 @@ +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + {#- must provide the node (id) to be removed in the 'target' pillar #} {%- set target = salt['pillar.get']('target') %} @@ -22,9 +25,9 @@ {%- endif %} {%- endif %} -{%- set etcd_members = salt.saltutil.runner('mine.get', tgt='G@roles:etcd', fun='network.interfaces', tgt_type='compound').keys() %} -{%- set masters = salt.saltutil.runner('mine.get', tgt='G@roles:kube-master', fun='network.interfaces', tgt_type='compound').keys() %} -{%- set minions = salt.saltutil.runner('mine.get', tgt='G@roles:kube-minion', fun='network.interfaces', tgt_type='compound').keys() %} +{%- set etcd_members = salt.caasp_nodes.get_with_expr('G@roles:etcd') %} +{%- set masters = salt.caasp_nodes.get_with_expr('G@roles:kube-master') %} +{%- set minions = salt.caasp_nodes.get_with_expr('G@roles:kube-minion') %} {%- set super_master_tgt = salt.caasp_nodes.get_super_master(masters=masters, excluded=[target] + nodes_down) %} diff --git a/salt/orch/update-etc-hosts.sls b/salt/orch/update-etc-hosts.sls index 52bed642a..1880567ba 100644 --- a/salt/orch/update-etc-hosts.sls +++ b/salt/orch/update-etc-hosts.sls @@ -1,3 +1,6 @@ +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + {%- set updates_all_target = 'P@roles:(admin|etcd|kube-(master|minion)) and ' + 'G@bootstrap_complete:true and ' + 'not G@bootstrap_in_progress:true and ' + @@ -5,7 +8,8 @@ 'not G@removal_in_progress:true and ' + 'not G@force_removal_in_progress:true' %} -{%- if salt.saltutil.runner('mine.get', tgt=updates_all_target, fun='nodename', tgt_type='compound')|length > 0 %} +{%- if salt.caasp_nodes.get_with_expr(updates_all_target)|length > 0 %} + update_pillar: salt.function: - tgt: {{ updates_all_target }} @@ -37,4 +41,5 @@ etc_hosts_setup: - etc-hosts - require: - salt: update_mine + {% endif %} diff --git a/salt/orch/update.sls b/salt/orch/update.sls index 6046db937..c8bd7fdc9 100644 --- a/salt/orch/update.sls +++ b/salt/orch/update.sls @@ -1,3 +1,6 @@ +{#- Make sure we start with an updated mine #} +{%- set _ = salt.caasp_orch.sync_all() %} + {#- Get a list of nodes seem to be down or unresponsive #} {#- This sends a "are you still there?" message to all #} {#- the nodes and wait for a response, so it takes some time. #} @@ -29,7 +32,8 @@ {%- set is_updateable_master_tgt = is_updateable_tgt + ' and ' + is_master_tgt %} {%- set is_updateable_worker_tgt = is_updateable_tgt + ' and ' + is_worker_tgt %} {%- set is_updateable_node_tgt = '( ' + is_updateable_master_tgt + ' ) or ( ' + is_updateable_worker_tgt + ' )' %} -{%- set all_masters = salt.saltutil.runner('mine.get', tgt=is_master_tgt, fun='network.interfaces', tgt_type='compound').keys() %} + +{%- set all_masters = salt.caasp_nodes.get_with_expr(is_master_tgt) %} {%- set super_master = all_masters|first %} {%- set is_migration = salt['pillar.get']('migration', false) %} @@ -194,8 +198,8 @@ early-services-setup: - etcd-setup # Get list of masters needing reboot -{%- set masters = salt.saltutil.runner('mine.get', tgt=is_updateable_master_tgt, fun='network.interfaces', tgt_type='compound') %} -{%- for master_id in masters.keys() %} +{%- set masters = salt.caasp_nodes.get_with_expr(is_updateable_master_tgt) %} +{%- for master_id in masters %} # Kubelet needs other services, e.g. the cri, up + running. This provide a way # to ensure kubelet is stopped before any other services. @@ -305,13 +309,13 @@ all-masters-post-start-services: - kubelet.update-post-start-services - require: - early-services-setup -{%- for master_id in masters.keys() %} +{%- for master_id in masters %} - {{ master_id }}-start-services {%- endfor %} # We remove the grain when we have the last reference to using that grain. # Otherwise an incomplete subset of minions might be targeted. -{%- for master_id in masters.keys() %} +{%- for master_id in masters %} {{ master_id }}-reboot-needed-grain: salt.function: - tgt: '{{ master_id }}' @@ -345,13 +349,13 @@ all-workers-2.0-pre-clean-shutdown: - migrations.2-3.haproxy - require: - all-masters-post-start-services -{%- for master_id in masters.keys() %} +{%- for master_id in masters %} - {{ master_id }}-reboot-needed-grain {%- endfor %} # END NOTE: Remove me for 4.0 -{%- set workers = salt.saltutil.runner('mine.get', tgt=is_updateable_worker_tgt, fun='network.interfaces', tgt_type='compound') %} -{%- for worker_id, ip in workers.items() %} +{%- set workers = salt.caasp_nodes.get_with_expr(is_updateable_worker_tgt) %} +{%- for worker_id in workers %} # Call the node clean shutdown script # Kubelet needs other services, e.g. the cri, up + running. This provide a way @@ -365,7 +369,7 @@ all-workers-2.0-pre-clean-shutdown: - require: - all-workers-2.0-pre-clean-shutdown # wait until all the masters have been updated -{%- for master_id in masters.keys() %} +{%- for master_id in masters %} - {{ master_id }}-reboot-needed-grain {%- endfor %} @@ -504,13 +508,13 @@ kubelet-setup: - require: - all-masters-post-start-services # wait until all the machines in the cluster have been upgraded -{%- for master_id in masters.keys() %} +{%- for master_id in masters %} # We use the last state within the masters loop, which is different # on masters and minions. - {{ master_id }}-reboot-needed-grain {%- endfor %} {%- if not is_migration %} -{%- for worker_id in workers.keys() %} +{%- for worker_id in workers %} - {{ worker_id }}-remove-progress-grain {%- endfor %} {% endif %}