Skip to content

Commit

Permalink
fixing dsl issue
Browse files Browse the repository at this point in the history
  • Loading branch information
EarthmanT committed May 9, 2017
1 parent e06553a commit 147b186
Show file tree
Hide file tree
Showing 3 changed files with 118 additions and 111 deletions.
147 changes: 75 additions & 72 deletions aws-blueprint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -271,82 +271,85 @@ groups:
members:
- kubernetes_node_host

scale_up_group:
members: [kubernetes_node_host]

scale_up_group:
members: [kubernetes_node_host]
# This defines a scale group whose members may be scaled up, incrementing by 1.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be more than 3 for a total of 10 seconds.
# No more than 6 hosts will be allowed.
policies:
auto_scale_up:
type: scale_policy_type
properties:
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

scale_down_group:
members: [kubernetes_node_host]
# This defines a scale group whose members may be scaled up, incrementing by 1.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be more than 3 for a total of 10 seconds.
# No more than 6 hosts will be allowed.
policies:
auto_scale_up:
type: scale_policy_type
properties:
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

scale_down_group:
# This defines a scale group whose members may be scaled down. Only one host will be removed per run.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be less than 1 for a total of 200 seconds.
# No less than 2 hosts will be allowed.
members: [kubernetes_node_host]
policies:
auto_scale_down:
type: scale_policy_type
properties:
scale_limit: 2
scale_direction: '>'
scale_threshold: 25
#service_selector: .*kubernetes_node_host.*.process.hyperkube.cpu.percent
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
moving_window_size: 30
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: -1
scalable_entity_name: kubernetes_node
scale_compute: true

heal_group:
# This defines a group of hosts in members that may be healed.
# The heal workflow is called when a the following policy criteria are met.
# Either the hyperkube process on the host, or the total host CPU need fall silent.
# The host and all software that it is supposed to have running on it will be healed.
members: [kubernetes_node_host]
policies:
simple_autoheal_policy:
type: cloudify.policies.types.host_failure
properties:
service:
- .*kubernetes_node_host.*.cpu.total.system
- .*kubernetes_node_host.*.process.hyperkube.cpu.percent
interval_between_workflows: 60
triggers:
auto_heal_trigger:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: heal
workflow_parameters:
node_instance_id: { 'get_property': [ SELF, node_id ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }
policies:
auto_scale_down:
type: scale_policy_type
properties:
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

heal_group:
# This defines a group of hosts in members that may be healed.
# The heal workflow is called when a the following policy criteria are met.
# Either the hyperkube process on the host, or the total host CPU need fall silent.
# The host and all software that it is supposed to have running on it will be healed.
members: [kubernetes_node_host]
policies:
simple_autoheal_policy:
type: cloudify.policies.types.host_failure
properties:
service:
- .*kubernetes_node_host.*.cpu.total.system
- .*kubernetes_node_host.*.process.hyperkube.cpu.percent
interval_between_workflows: 60
triggers:
auto_heal_trigger:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: heal
workflow_parameters:
node_instance_id: { 'get_property': [ SELF, node_id ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }

policies:

kubernetes_node_vms_scaling_policy:
type: cloudify.policies.scaling
Expand Down
42 changes: 22 additions & 20 deletions azure-blueprint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ node_templates:

groups:

azure_k8s_node_scale_group:
k8s_node_scale_group:
members:
- kubernetes_node_host_nic_ip_cfg
- kubernetes_node_host_nic
Expand Down Expand Up @@ -623,34 +623,36 @@ groups:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: azure_k8s_node_scale_group
scalable_entity_name: kubernetes_node
scale_compute: true

scale_down_group:
# This defines a scale group whose members may be scaled down. Only one host will be removed per run.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be less than 1 for a total of 200 seconds.
# No less than 2 hosts will be allowed.
members: [kubernetes_node_host]
# This defines a scale group whose members may be scaled up, incrementing by 1.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be more than 3 for a total of 10 seconds.
# No more than 6 hosts will be allowed.
policies:
auto_scale_down:
type: scale_policy_type
properties:
scale_limit: 2
scale_direction: '>'
scale_threshold: 25
#service_selector: .*kubernetes_node_host.*.process.hyperkube.cpu.percent
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
moving_window_size: 30
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: -1
scalable_entity_name: azure_k8s_node_scale_group

type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

heal_group:
# This defines a group of hosts in members that may be healed.
# The heal workflow is called when a the following policy criteria are met.
Expand All @@ -672,15 +674,15 @@ groups:
workflow: heal
workflow_parameters:
node_instance_id: { 'get_property': [ SELF, node_id ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }

policies:

kubernetes_node_vms_scaling_policy:
type: cloudify.policies.scaling
properties:
default_instances: 1
targets: [azure_k8s_node_scale_group]
targets: [k8s_node_scale_group]

outputs:
kubernetes_info:
Expand Down
40 changes: 21 additions & 19 deletions openstack-blueprint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,8 @@ groups:

k8s_node_scale_group:
members:
- kubernetes_node_port
- kubernetes_node_host
- kubernetes_node_port

scale_up_group:
members: [kubernetes_node_host]
Expand All @@ -315,34 +315,36 @@ groups:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: k8s_node_scale_group
scalable_entity_name: kubernetes_node
scale_compute: true

scale_down_group:
# This defines a scale group whose members may be scaled down. Only one host will be removed per run.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be less than 1 for a total of 200 seconds.
# No less than 2 hosts will be allowed.
members: [kubernetes_node_host]
# This defines a scale group whose members may be scaled up, incrementing by 1.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be more than 3 for a total of 10 seconds.
# No more than 6 hosts will be allowed.
policies:
auto_scale_down:
type: scale_policy_type
properties:
scale_limit: 2
scale_direction: '>'
scale_threshold: 25
#service_selector: .*kubernetes_node_host.*.process.hyperkube.cpu.percent
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
moving_window_size: 30
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: -1
scalable_entity_name: k8s_node_scale_group

type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

heal_group:
# This defines a group of hosts in members that may be healed.
# The heal workflow is called when a the following policy criteria are met.
Expand All @@ -364,7 +366,7 @@ groups:
workflow: heal
workflow_parameters:
node_instance_id: { 'get_property': [ SELF, node_id ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }

policies:

Expand Down

0 comments on commit 147b186

Please sign in to comment.