Skip to content
This repository has been archived by the owner on Oct 29, 2019. It is now read-only.

Commit

Permalink
Merge pull request #746 from kubic-project/update_mine_before_orch
Browse files Browse the repository at this point in the history
Synchronize everything before starting an orchestration.
  • Loading branch information
MalloZup authored Feb 27, 2019
2 parents b4fc964 + bb22844 commit 6af85a7
Show file tree
Hide file tree
Showing 8 changed files with 59 additions and 19 deletions.
16 changes: 16 additions & 0 deletions salt/_modules/caasp_orch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from __future__ import absolute_import


def __virtual__():
return "caasp_orch"


def sync_all():
'''
Syncronize everything before starting a new orchestration
'''
__utils__['caasp_log.debug']('orch: refreshing all')
__salt__['saltutil.sync_all'](refresh=True)

__utils__['caasp_log.debug']('orch: synchronizing the mine')
__salt__['saltutil.runner']('mine.update', tgt='*', clear=True)
3 changes: 3 additions & 0 deletions salt/orch/etcd-migrate.sls
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

# Generic Updates
update_pillar:
salt.function:
Expand Down
5 changes: 4 additions & 1 deletion salt/orch/force-removal.sls
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
# must provide the node (id) to be removed in the 'target' pillar
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

{#- must provide the node (id) to be removed in the 'target' pillar #}
{%- set target = salt['pillar.get']('target') %}

{%- set super_master = salt.saltutil.runner('manage.up', tgt='G@roles:kube-master and not ' + target, expr_form='compound')|first %}
Expand Down
9 changes: 6 additions & 3 deletions salt/orch/kubernetes.sls
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

{%- set default_batch = salt['pillar.get']('default_batch', 5) %}

{%- set etcd_members = salt.saltutil.runner('mine.get', tgt='G@roles:etcd', fun='network.interfaces', tgt_type='compound').keys() %}
{%- set masters = salt.saltutil.runner('mine.get', tgt='G@roles:kube-master', fun='network.interfaces', tgt_type='compound').keys() %}
{%- set minions = salt.saltutil.runner('mine.get', tgt='G@roles:kube-minion', fun='network.interfaces', tgt_type='compound').keys() %}
{%- set etcd_members = salt.caasp_nodes.get_with_expr('G@roles:etcd') %}
{%- set masters = salt.caasp_nodes.get_with_expr('G@roles:kube-master') %}
{%- set minions = salt.caasp_nodes.get_with_expr('G@roles:kube-minion') %}

{%- set super_master = masters|first %}

Expand Down
3 changes: 3 additions & 0 deletions salt/orch/prepare-product-migration.sls
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

{#- Get a list of nodes seem to be down or unresponsive #}
{#- This sends a "are you still there?" message to all #}
{#- the nodes and wait for a response, so it takes some time. #}
Expand Down
9 changes: 6 additions & 3 deletions salt/orch/removal.sls
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

{#- must provide the node (id) to be removed in the 'target' pillar #}
{%- set target = salt['pillar.get']('target') %}

Expand All @@ -22,9 +25,9 @@
{%- endif %}
{%- endif %}

{%- set etcd_members = salt.saltutil.runner('mine.get', tgt='G@roles:etcd', fun='network.interfaces', tgt_type='compound').keys() %}
{%- set masters = salt.saltutil.runner('mine.get', tgt='G@roles:kube-master', fun='network.interfaces', tgt_type='compound').keys() %}
{%- set minions = salt.saltutil.runner('mine.get', tgt='G@roles:kube-minion', fun='network.interfaces', tgt_type='compound').keys() %}
{%- set etcd_members = salt.caasp_nodes.get_with_expr('G@roles:etcd') %}
{%- set masters = salt.caasp_nodes.get_with_expr('G@roles:kube-master') %}
{%- set minions = salt.caasp_nodes.get_with_expr('G@roles:kube-minion') %}

{%- set super_master_tgt = salt.caasp_nodes.get_super_master(masters=masters,
excluded=[target] + nodes_down) %}
Expand Down
7 changes: 6 additions & 1 deletion salt/orch/update-etc-hosts.sls
Original file line number Diff line number Diff line change
@@ -1,11 +1,15 @@
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

{%- set updates_all_target = 'P@roles:(admin|etcd|kube-(master|minion)) and ' +
'G@bootstrap_complete:true and ' +
'not G@bootstrap_in_progress:true and ' +
'not G@update_in_progress:true and ' +
'not G@removal_in_progress:true and ' +
'not G@force_removal_in_progress:true' %}

{%- if salt.saltutil.runner('mine.get', tgt=updates_all_target, fun='nodename', tgt_type='compound')|length > 0 %}
{%- if salt.caasp_nodes.get_with_expr(updates_all_target)|length > 0 %}

update_pillar:
salt.function:
- tgt: {{ updates_all_target }}
Expand Down Expand Up @@ -37,4 +41,5 @@ etc_hosts_setup:
- etc-hosts
- require:
- salt: update_mine

{% endif %}
26 changes: 15 additions & 11 deletions salt/orch/update.sls
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
{#- Make sure we start with an updated mine #}
{%- set _ = salt.caasp_orch.sync_all() %}

{#- Get a list of nodes seem to be down or unresponsive #}
{#- This sends a "are you still there?" message to all #}
{#- the nodes and wait for a response, so it takes some time. #}
Expand Down Expand Up @@ -29,7 +32,8 @@
{%- set is_updateable_master_tgt = is_updateable_tgt + ' and ' + is_master_tgt %}
{%- set is_updateable_worker_tgt = is_updateable_tgt + ' and ' + is_worker_tgt %}
{%- set is_updateable_node_tgt = '( ' + is_updateable_master_tgt + ' ) or ( ' + is_updateable_worker_tgt + ' )' %}
{%- set all_masters = salt.saltutil.runner('mine.get', tgt=is_master_tgt, fun='network.interfaces', tgt_type='compound').keys() %}

{%- set all_masters = salt.caasp_nodes.get_with_expr(is_master_tgt) %}
{%- set super_master = all_masters|first %}

{%- set is_migration = salt['pillar.get']('migration', false) %}
Expand Down Expand Up @@ -194,8 +198,8 @@ early-services-setup:
- etcd-setup

# Get list of masters needing reboot
{%- set masters = salt.saltutil.runner('mine.get', tgt=is_updateable_master_tgt, fun='network.interfaces', tgt_type='compound') %}
{%- for master_id in masters.keys() %}
{%- set masters = salt.caasp_nodes.get_with_expr(is_updateable_master_tgt) %}
{%- for master_id in masters %}

# Kubelet needs other services, e.g. the cri, up + running. This provide a way
# to ensure kubelet is stopped before any other services.
Expand Down Expand Up @@ -305,13 +309,13 @@ all-masters-post-start-services:
- kubelet.update-post-start-services
- require:
- early-services-setup
{%- for master_id in masters.keys() %}
{%- for master_id in masters %}
- {{ master_id }}-start-services
{%- endfor %}

# We remove the grain when we have the last reference to using that grain.
# Otherwise an incomplete subset of minions might be targeted.
{%- for master_id in masters.keys() %}
{%- for master_id in masters %}
{{ master_id }}-reboot-needed-grain:
salt.function:
- tgt: '{{ master_id }}'
Expand Down Expand Up @@ -345,13 +349,13 @@ all-workers-2.0-pre-clean-shutdown:
- migrations.2-3.haproxy
- require:
- all-masters-post-start-services
{%- for master_id in masters.keys() %}
{%- for master_id in masters %}
- {{ master_id }}-reboot-needed-grain
{%- endfor %}
# END NOTE: Remove me for 4.0

{%- set workers = salt.saltutil.runner('mine.get', tgt=is_updateable_worker_tgt, fun='network.interfaces', tgt_type='compound') %}
{%- for worker_id, ip in workers.items() %}
{%- set workers = salt.caasp_nodes.get_with_expr(is_updateable_worker_tgt) %}
{%- for worker_id in workers %}

# Call the node clean shutdown script
# Kubelet needs other services, e.g. the cri, up + running. This provide a way
Expand All @@ -365,7 +369,7 @@ all-workers-2.0-pre-clean-shutdown:
- require:
- all-workers-2.0-pre-clean-shutdown
# wait until all the masters have been updated
{%- for master_id in masters.keys() %}
{%- for master_id in masters %}
- {{ master_id }}-reboot-needed-grain
{%- endfor %}

Expand Down Expand Up @@ -504,13 +508,13 @@ kubelet-setup:
- require:
- all-masters-post-start-services
# wait until all the machines in the cluster have been upgraded
{%- for master_id in masters.keys() %}
{%- for master_id in masters %}
# We use the last state within the masters loop, which is different
# on masters and minions.
- {{ master_id }}-reboot-needed-grain
{%- endfor %}
{%- if not is_migration %}
{%- for worker_id in workers.keys() %}
{%- for worker_id in workers %}
- {{ worker_id }}-remove-progress-grain
{%- endfor %}
{% endif %}
Expand Down

0 comments on commit 6af85a7

Please sign in to comment.