Skip to content

Commit

Permalink
replacing everything with updated version
Browse files Browse the repository at this point in the history
  • Loading branch information
EarthmanT committed May 30, 2017
1 parent 7604de5 commit 11ba787
Show file tree
Hide file tree
Showing 40 changed files with 873 additions and 3,125 deletions.
170 changes: 129 additions & 41 deletions README.md

Large diffs are not rendered by default.

225 changes: 57 additions & 168 deletions aws-blueprint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,71 +2,45 @@ tosca_definitions_version: cloudify_dsl_1_3

description: >
This blueprint creates a Kubernetes Cluster.
It includes a master and two or more nodes with auto-scaling and auto-healing of the nodes.
It is based on the Kubernetes Portable Multi-Node Cluster guide in the Kubernetes documentation website.
https://kubernetes.io/docs/getting-started-guides/docker-multinode/
It is based on this documentation: https://kubernetes.io/docs/getting-started-guides/kubeadm/
imports:
- http://www.getcloudify.org/spec/cloudify/4.0/types.yaml
- http://getcloudify.org.s3.amazonaws.com/spec/aws-plugin/1.4.4/plugin.yaml
- http://www.getcloudify.org/spec/fabric-plugin/1.3.1/plugin.yaml
- http://www.getcloudify.org/spec/cloudify/4.0.1/types.yaml
- http://getcloudify.org.s3.amazonaws.com/spec/aws-plugin/1.4.9/plugin.yaml
- http://www.getcloudify.org/spec/diamond-plugin/1.3.5/plugin.yaml
- types/scale.yaml
- imports/kubernetes-blueprint.yaml
- types/cloud_config/cloud-config.yaml
- types/kubernetes.yaml
- imports/kubernetes.yaml
- imports/cloud-config.yaml

inputs:

key_name:
default: kubernetes-blueprint-key

private_key_path:
default: ~/.ssh/kubernetes-blueprint-key.pem

vpc_id:
type: string

vpc_cidr_block:
type: string

public_subnet_id:
type: string

public_subnet_cidr:
type: string

private_subnet_id:
type: string

private_subnet_cidr:
type: string

ec2_region_name:
default: us-east-1

ec2_region_endpoint:
default: ec2.us-east-1.amazonaws.com

availability_zone:
default: us-east-1e

ami:
description: >
Amazon Ubuntu 14.04 AMI
An AWS AMI. Tested with a Centos 7.0 image.
default: ami-ae7bfdb8

instance_type:
description: >
Agent VM Instance Type
The AWS instance_type. Tested with m3.medium, although that is unnecessarily large.
default: t2.small

agent_user:
default: ubuntu
description: >
The username of the agent running on the instance created from the image.
default: centos

encode_cloud_config:
default: false

dsl_definitions:

aws_config: &aws_config
aws_access_key_id: { get_secret: aws_access_key_id }
aws_secret_access_key: { get_secret: aws_secret_access_key }
ec2_region_name: { get_input: ec2_region_name }
ec2_region_endpoint: { get_input: ec2_region_endpoint }
ec2_region_name: { get_secret: ec2_region_name }
ec2_region_endpoint: { get_secret: ec2_region_endpoint }

node_templates:

Expand All @@ -75,22 +49,21 @@ node_templates:
properties:
agent_config:
install_method: remote
port: 22
user: { get_input: agent_user }
key: { get_property: [ key, private_key_path ] }
min_workers: 2
port: 22
key: { get_secret: agent_key_private }
aws_config: *aws_config
image_id: { get_input: ami }
instance_type: { get_input: instance_type }
parameters:
user_data: |
#!/bin/bash
sudo groupadd docker
sudo gpasswd -a ubuntu docker
placement: { get_property: [ public_subnet, availability_zone ] }
interfaces:
cloudify.interfaces.lifecycle:
create:
implementation: aws.cloudify_aws.ec2.instance.create
inputs:
args:
placement: { get_secret: availability_zone }
user_data: { get_attribute: [ cloudify_host_cloud_config, cloud_config ] }
relationships:
- type: cloudify.aws.relationships.instance_connected_to_keypair
target: key
- type: cloudify.aws.relationships.instance_connected_to_subnet
target: public_subnet
- type: cloudify.aws.relationships.instance_connected_to_security_group
Expand All @@ -105,29 +78,27 @@ node_templates:
properties:
agent_config:
install_method: remote
port: 22
user: { get_input: agent_user }
key: { get_property: [ key, private_key_path ] }
min_workers: 2
port: 22
key: { get_secret: agent_key_private }
aws_config: *aws_config
image_id: { get_input: ami }
instance_type: { get_input: instance_type }
parameters:
user_data: |
#!/bin/bash
sudo groupadd docker
sudo gpasswd -a ubuntu docker
placement: { get_property: [ private_subnet, availability_zone ] }
relationships:
- type: cloudify.aws.relationships.instance_connected_to_keypair
target: key
- type: cloudify.aws.relationships.instance_connected_to_subnet
target: private_subnet
- type: cloudify.aws.relationships.instance_connected_to_security_group
target: ssh_group
- type: cloudify.aws.relationships.instance_connected_to_security_group
target: kubernetes_security_group
interfaces:
cloudify.interfaces.lifecycle:
create:
implementation: aws.cloudify_aws.ec2.instance.create
inputs:
args:
placement: { get_secret: availability_zone }
user_data: { get_attribute: [ cloudify_host_cloud_config, cloud_config ] }
cloudify.interfaces.monitoring_agent:
install:
implementation: diamond.diamond_agent.tasks.install
Expand Down Expand Up @@ -210,24 +181,30 @@ node_templates:
type: cloudify.aws.nodes.SecurityGroup
properties:
aws_config: *aws_config
description: Puppet Group
description: SSH Group
rules:
- ip_protocol: tcp
from_port: 22
to_port: 22
cidr_ip: { get_input: vpc_cidr_block }
cidr_ip: 0.0.0.0/0
relationships:
- type: cloudify.aws.relationships.security_group_contained_in_vpc
target: vpc

kubernetes_master_ip:
type: cloudify.aws.nodes.ElasticIP
properties:
aws_config: *aws_config
domain: vpc

public_subnet:
type: cloudify.aws.nodes.Subnet
properties:
aws_config: *aws_config
use_external_resource: true
resource_id: { get_input: public_subnet_id }
cidr_block: { get_input: public_subnet_cidr }
availability_zone: { get_input: availability_zone }
resource_id: { get_secret: public_subnet_id }
cidr_block: N/A
availability_zone: N/A
relationships:
- type: cloudify.aws.relationships.subnet_contained_in_vpc
target: vpc
Expand All @@ -237,9 +214,9 @@ node_templates:
properties:
aws_config: *aws_config
use_external_resource: true
resource_id: { get_input: private_subnet_id }
cidr_block: { get_input: private_subnet_cidr }
availability_zone: { get_input: availability_zone }
resource_id: { get_secret: private_subnet_id }
cidr_block: N/A
availability_zone: N/A
relationships:
- type: cloudify.aws.relationships.subnet_contained_in_vpc
target: vpc
Expand All @@ -249,106 +226,18 @@ node_templates:
properties:
aws_config: *aws_config
use_external_resource: true
resource_id: { get_input: vpc_id }
cidr_block: { get_input: vpc_cidr_block }

key:
type: cloudify.aws.nodes.KeyPair
properties:
aws_config: *aws_config
resource_id: { get_input: key_name }
private_key_path: { get_input: private_key_path }

kubernetes_master_ip:
type: cloudify.aws.nodes.ElasticIP
properties:
aws_config: *aws_config
domain: vpc
resource_id: { get_secret: vpc_id }
cidr_block: N/A
relationships:
- type: cloudify.relationships.depends_on
target: cloudify_host_cloud_config

groups:

k8s_node_scale_group:
members:
- kubernetes_node_host


scale_up_group:
members: [kubernetes_node_host]
# This defines a scale group whose members may be scaled up, incrementing by 1.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be more than 3 for a total of 10 seconds.
# No more than 6 hosts will be allowed.
policies:
auto_scale_up:
type: scale_policy_type
properties:
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

scale_down_group:
members: [kubernetes_node_host]
# This defines a scale group whose members may be scaled up, incrementing by 1.
# The scale worflow is called when the following criteria are met
# The Hyperkube process total CPU will be more than 3 for a total of 10 seconds.
# No more than 6 hosts will be allowed.
policies:
auto_scale_down:
type: scale_policy_type
properties:
policy_operates_on_group: true
scale_limit: 6
scale_direction: '<'
scale_threshold: 30
#service_selector: .*kubernetes_node_host.*.cpu.total.user
service_selector: .*kubernetes_node_host.*cpu.total.user
cooldown_time: 60
triggers:
execute_scale_workflow:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: scale
workflow_parameters:
delta: 1
scalable_entity_name: kubernetes_node
scale_compute: true

heal_group:
# This defines a group of hosts in members that may be healed.
# The heal workflow is called when a the following policy criteria are met.
# Either the hyperkube process on the host, or the total host CPU need fall silent.
# The host and all software that it is supposed to have running on it will be healed.
members: [kubernetes_node_host]
policies:
simple_autoheal_policy:
type: cloudify.policies.types.host_failure
properties:
service:
- .*kubernetes_node_host.*.cpu.total.system
- .*kubernetes_node_host.*.process.hyperkube.cpu.percent
interval_between_workflows: 60
triggers:
auto_heal_trigger:
type: cloudify.policies.triggers.execute_workflow
parameters:
workflow: heal
workflow_parameters:
node_instance_id: { 'get_property': [ SELF, node_id ] }
diagnose_value: { 'get_property': [ SELF, diagnose ] }

policies:

kubernetes_node_vms_scaling_policy:
Expand Down
Loading

0 comments on commit 11ba787

Please sign in to comment.