From 2e38c894c2a0ad4ed5eb71b1331a9fbc364dd303 Mon Sep 17 00:00:00 2001 From: Gareth Jones Date: Mon, 10 Apr 2023 12:52:55 +1200 Subject: [PATCH 1/4] fix: adjust the grammar in some messages --- lib/ecs_deploy/auto_scaler.rb | 4 ++-- .../auto_scaler/auto_scaling_group_config.rb | 8 ++++---- .../auto_scaler/cluster_resource_manager.rb | 8 ++++---- .../auto_scaler/instance_drainer.rb | 2 +- lib/ecs_deploy/auto_scaler/service_config.rb | 20 +++++++++---------- .../auto_scaler/spot_fleet_request_config.rb | 2 +- lib/ecs_deploy/capistrano.rb | 2 +- .../instance_fluctuation_manager.rb | 4 ++-- lib/ecs_deploy/scheduled_task.rb | 4 ++-- lib/ecs_deploy/service.rb | 8 ++++---- lib/ecs_deploy/task_definition.rb | 4 ++-- 11 files changed, 33 insertions(+), 33 deletions(-) diff --git a/lib/ecs_deploy/auto_scaler.rb b/lib/ecs_deploy/auto_scaler.rb index e59de20..3495257 100644 --- a/lib/ecs_deploy/auto_scaler.rb +++ b/lib/ecs_deploy/auto_scaler.rb @@ -50,7 +50,7 @@ def main_loop(cluster_scaling_config) loop_with_polling_interval("loop of #{cluster_scaling_config.name}") do ths = cluster_scaling_config.service_configs.map do |service_config| Thread.new(service_config) do |s| - @logger.debug "Start service scaling of #{s.name}" + @logger.debug "Scaling service #{s.name}" s.adjust_desired_count(cluster_scaling_config.cluster_resource_manager) end end @@ -58,7 +58,7 @@ def main_loop(cluster_scaling_config) ths.each(&:join) - @logger.debug "Start cluster scaling of #{cluster_scaling_config.name}" + @logger.debug "Scaling cluster #{cluster_scaling_config.name}" required_capacity = cluster_scaling_config.service_configs.sum { |s| s.desired_count * s.required_capacity } cluster_scaling_config.update_desired_capacity(required_capacity) diff --git a/lib/ecs_deploy/auto_scaler/auto_scaling_group_config.rb b/lib/ecs_deploy/auto_scaler/auto_scaling_group_config.rb index 6a86dc5..15590ed 100644 --- a/lib/ecs_deploy/auto_scaler/auto_scaling_group_config.rb +++ b/lib/ecs_deploy/auto_scaler/auto_scaling_group_config.rb @@ -35,7 +35,7 @@ def update_desired_capacity(required_capacity) if decreased_capacity > 0 new_desired_capacity = current_asg.desired_capacity - decreased_capacity cluster_resource_manager.trigger_capacity_update(current_asg.desired_capacity, new_desired_capacity) - @logger.info "#{log_prefix} Update desired_capacity to #{new_desired_capacity}" + @logger.info "#{log_prefix} Updated desired_capacity to #{new_desired_capacity}" else @logger.info "#{log_prefix} Tried to Update desired_capacity but there were no deregisterable instances" end @@ -47,7 +47,7 @@ def update_desired_capacity(required_capacity) desired_capacity: desired_capacity, ) cluster_resource_manager.trigger_capacity_update(current_asg.desired_capacity, desired_capacity) - @logger.info "#{log_prefix} Update desired_capacity to #{desired_capacity}" + @logger.info "#{log_prefix} Updated desired_capacity to #{desired_capacity}" end rescue => e AutoScaler.error_logger.error(e) @@ -74,7 +74,7 @@ def detach_instances(instance_ids:, should_decrement_desired_capacity:) ) end - @logger.info "#{log_prefix} Detach instances from ASG: #{instance_ids.inspect}" + @logger.info "#{log_prefix} Detached instances from ASG: #{instance_ids.inspect}" end private @@ -89,7 +89,7 @@ def decrease_desired_capacity(count) auto_scaling_group_instances.any? {|instance| instance.instance_id == i.ec2_instance_id } end - @logger.info "#{log_prefix} Fetch deregisterable instances: #{deregisterable_instances.map(&:ec2_instance_id).inspect}" + @logger.info "#{log_prefix} Fetched deregisterable instances: #{deregisterable_instances.map(&:ec2_instance_id).inspect}" az_to_instance_count = auto_scaling_group_instances.each_with_object(Hash.new(0)) { |i, h| h[i.availability_zone] += 1 } az_to_deregisterable_instances = deregisterable_instances.group_by do |i| diff --git a/lib/ecs_deploy/auto_scaler/cluster_resource_manager.rb b/lib/ecs_deploy/auto_scaler/cluster_resource_manager.rb index f7efa4e..efe7855 100644 --- a/lib/ecs_deploy/auto_scaler/cluster_resource_manager.rb +++ b/lib/ecs_deploy/auto_scaler/cluster_resource_manager.rb @@ -27,7 +27,7 @@ def initialize(region:, cluster:, service_configs:, logger: nil, capacity_based_ def acquire(capacity, timeout: nil) @mutex.synchronize do - @logger&.debug("#{log_prefix} Try to acquire #{capacity} capacity (capacity: #{@capacity}, used_capacity: #{@used_capacity})") + @logger&.debug("#{log_prefix} Trying to acquire #{capacity} capacity (capacity: #{@capacity}, used_capacity: #{@used_capacity})") Timeout.timeout(timeout) do while @capacity - @used_capacity < capacity @resource.wait(@mutex) @@ -77,7 +77,7 @@ def trigger_capacity_update(old_desired_capacity, new_desired_capacity, interval return if new_desired_capacity == old_desired_capacity th = Thread.new do - @logger&.info "#{log_prefix} Start updating capacity: #{old_desired_capacity} -> #{new_desired_capacity}" + @logger&.info "#{log_prefix} Updating capacity: #{old_desired_capacity} -> #{new_desired_capacity}" Timeout.timeout(180) do until @capacity == new_desired_capacity || (new_desired_capacity > old_desired_capacity && @capacity > new_desired_capacity) || @@ -91,7 +91,7 @@ def trigger_capacity_update(old_desired_capacity, new_desired_capacity, interval sleep interval end - @logger&.info "#{log_prefix} capacity is updated to #{@capacity}" + @logger&.info "#{log_prefix} updated capacity to #{@capacity}" end rescue Timeout::Error => e msg = "#{log_prefix} `#{__method__}': #{e} (#{e.class})" @@ -108,7 +108,7 @@ def trigger_capacity_update(old_desired_capacity, new_desired_capacity, interval end if wait_until_capacity_updated - @logger&.info "#{log_prefix} Wait for the capacity of active instances to become #{new_desired_capacity} from #{old_desired_capacity}" + @logger&.info "#{log_prefix} Waiting for the number of active instances to reach #{new_desired_capacity} (from #{old_desired_capacity})" th.join end end diff --git a/lib/ecs_deploy/auto_scaler/instance_drainer.rb b/lib/ecs_deploy/auto_scaler/instance_drainer.rb index 563deb5..f5ac3df 100644 --- a/lib/ecs_deploy/auto_scaler/instance_drainer.rb +++ b/lib/ecs_deploy/auto_scaler/instance_drainer.rb @@ -79,7 +79,7 @@ def set_instance_state_to_draining(config_to_instance_ids, region) cl = ecs_client(region) config_to_instance_ids.each do |config, instance_ids| if config.disable_draining == true || config.disable_draining == "true" - @logger.info "Skip draining instances: region: #{region}, cluster: #{config.cluster}, instance_ids: #{instance_ids.inspect}" + @logger.info "Skipped draining instances: region: #{region}, cluster: #{config.cluster}, instance_ids: #{instance_ids.inspect}" next end diff --git a/lib/ecs_deploy/auto_scaler/service_config.rb b/lib/ecs_deploy/auto_scaler/service_config.rb index 46dd0e6..c907e5a 100644 --- a/lib/ecs_deploy/auto_scaler/service_config.rb +++ b/lib/ecs_deploy/auto_scaler/service_config.rb @@ -40,7 +40,7 @@ def adjust_desired_count(cluster_resource_manager) next if difference >= trigger.step if trigger.match? - @logger.info "#{log_prefix} Fire upscale trigger by #{trigger.alarm_name} #{trigger.state}" + @logger.info "#{log_prefix} Firing upscale trigger by #{trigger.alarm_name} #{trigger.state}" difference = trigger.step end end @@ -50,7 +50,7 @@ def adjust_desired_count(cluster_resource_manager) next if difference > 0 && !trigger.prioritized_over_upscale_triggers? next unless trigger.match? - @logger.info "#{log_prefix} Fire downscale trigger by #{trigger.alarm_name} #{trigger.state}" + @logger.info "#{log_prefix} Firing downscale trigger by #{trigger.alarm_name} #{trigger.state}" difference = [difference, -trigger.step].min end end @@ -124,28 +124,28 @@ def update_service(difference, cluster_resource_manager) if current_level < next_level && overheat? # next max level = next_level @reach_max_at = nil - @logger.info "#{log_prefix} Service is overheat, uses next max count" + @logger.info "#{log_prefix} Service is overheated, uses next max count" elsif current_level < next_level && !overheat? # wait cooldown level = current_level now = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second) @reach_max_at ||= now - @logger.info "#{log_prefix} Service waits cooldown elapsed #{(now - @reach_max_at).to_i}sec" + @logger.info "#{log_prefix} Service waiting for cooldown period to elapse #{(now - @reach_max_at).to_i}sec" elsif current_level == next_level && next_desired_count >= max_task_count[current_level] # reach current max level = current_level now = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second) @reach_max_at ||= now - @logger.info "#{log_prefix} Service waits cooldown elapsed #{(now - @reach_max_at).to_i}sec" + @logger.info "#{log_prefix} Service waiting for cooldown period to elapse #{(now - @reach_max_at).to_i}sec" if next_desired_count > max_task_count[current_level] && current_level == max_task_count.size - 1 @logger.warn "#{log_prefix} Desired count has reached the maximum value and couldn't be increased" end elsif current_level == next_level && next_desired_count < max_task_count[current_level] level = current_level @reach_max_at = nil - @logger.info "#{log_prefix} Service clears cooldown state" + @logger.info "#{log_prefix} Service has finished cooling down" elsif current_level > next_level level = next_level @reach_max_at = nil - @logger.info "#{log_prefix} Service clears cooldown state" + @logger.info "#{log_prefix} Service has finished cooling down" end next_desired_count = [next_desired_count, max_task_count[level]].min @@ -156,7 +156,7 @@ def update_service(difference, cluster_resource_manager) end @last_updated_at = Process.clock_gettime(Process::CLOCK_MONOTONIC, :second) - @logger.info "#{log_prefix} Update desired_count to #{next_desired_count}" + @logger.info "#{log_prefix} Updated desired_count to #{next_desired_count}" rescue => e AutoScaler.error_logger.error(e) end @@ -197,7 +197,7 @@ def decrease_desired_count(by, cluster_resource_manager) cl.wait_until(:services_stable, cluster: cluster, services: [name]) do |w| w.before_wait do - @logger.debug "#{log_prefix} wait service stable" + @logger.debug "#{log_prefix} waiting for service to stabilize" end end @@ -205,7 +205,7 @@ def decrease_desired_count(by, cluster_resource_manager) stopping_task_arns.each_slice(MAX_DESCRIBABLE_TASK_COUNT) do |arns| cl.wait_until(:tasks_stopped, cluster: cluster, tasks: arns) do |w| w.before_wait do - @logger.debug "#{log_prefix} wait stopping tasks stopped" + @logger.debug "#{log_prefix} waiting for tasks to finish stopping" end end end diff --git a/lib/ecs_deploy/auto_scaler/spot_fleet_request_config.rb b/lib/ecs_deploy/auto_scaler/spot_fleet_request_config.rb index f92ff18..80dd049 100644 --- a/lib/ecs_deploy/auto_scaler/spot_fleet_request_config.rb +++ b/lib/ecs_deploy/auto_scaler/spot_fleet_request_config.rb @@ -43,7 +43,7 @@ def update_desired_capacity(required_capacity) # Wait until the capacity is updated to prevent the process from terminating before container draining is completed wait_until_capacity_updated: desired_capacity < request_config.target_capacity, ) - @logger.info "#{log_prefix} Update desired_capacity to #{desired_capacity}" + @logger.info "#{log_prefix} Updated desired_capacity to #{desired_capacity}" rescue => e AutoScaler.error_logger.error(e) end diff --git a/lib/ecs_deploy/capistrano.rb b/lib/ecs_deploy/capistrano.rb index cb4a443..8c7d190 100644 --- a/lib/ecs_deploy/capistrano.rb +++ b/lib/ecs_deploy/capistrano.rb @@ -166,7 +166,7 @@ EcsDeploy.logger.info "#{current_task_definition_arn} -> #{rollback_arn}" - raise "Past task_definition_arns is nothing" unless rollback_arn + raise "Past task_definition_arns is empty" unless rollback_arn service_options = { region: r, diff --git a/lib/ecs_deploy/instance_fluctuation_manager.rb b/lib/ecs_deploy/instance_fluctuation_manager.rb index 6509856..88adeeb 100644 --- a/lib/ecs_deploy/instance_fluctuation_manager.rb +++ b/lib/ecs_deploy/instance_fluctuation_manager.rb @@ -21,7 +21,7 @@ def initialize(region:, cluster:, auto_scaling_group_name:, desired_capacity:, l def increase asg = fetch_auto_scaling_group - @logger.info("Increase desired capacity of #{@auto_scaling_group_name}: #{asg.desired_capacity} => #{asg.max_size}") + @logger.info("Increasing desired capacity of #{@auto_scaling_group_name}: #{asg.desired_capacity} => #{asg.max_size}") as_client.update_auto_scaling_group(auto_scaling_group_name: @auto_scaling_group_name, desired_capacity: asg.max_size) # Run in background because increasing instances may take time @@ -47,7 +47,7 @@ def decrease @logger.info("The capacity is already #{asg.desired_capacity}") return end - @logger.info("Decrease desired capacity of #{@auto_scaling_group_name}: #{asg.desired_capacity} => #{@desired_capacity}") + @logger.info("Decreasing desired capacity of #{@auto_scaling_group_name}: #{asg.desired_capacity} => #{@desired_capacity}") container_instances = ecs_client.list_container_instances(cluster: @cluster).flat_map do |resp| ecs_client.describe_container_instances( diff --git a/lib/ecs_deploy/scheduled_task.rb b/lib/ecs_deploy/scheduled_task.rb index 14b61c9..bc40a32 100644 --- a/lib/ecs_deploy/scheduled_task.rb +++ b/lib/ecs_deploy/scheduled_task.rb @@ -62,7 +62,7 @@ def put_rule state: @enabled ? "ENABLED" : "DISABLED", description: @description, ) - EcsDeploy.logger.info "create cloudwatch event rule [#{res.rule_arn}] [#{@region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "created cloudwatch event rule [#{res.rule_arn}] [#{@region}] [#{Paint['OK', :green]}]" end def put_targets @@ -90,7 +90,7 @@ def put_targets targets: [target] ) if res.failed_entry_count.zero? - EcsDeploy.logger.info "create cloudwatch event target [#{@target_id}] [#{@region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "created cloudwatch event target [#{@target_id}] [#{@region}] [#{Paint['OK', :green]}]" else res.failed_entries.each do |entry| EcsDeploy.logger.error "failed to create cloudwatch event target [#{@region}] target_id=#{entry.target_id} error_code=#{entry.error_code} error_message=#{entry.error_message}" diff --git a/lib/ecs_deploy/service.rb b/lib/ecs_deploy/service.rb index ab6d422..9fdb912 100644 --- a/lib/ecs_deploy/service.rb +++ b/lib/ecs_deploy/service.rb @@ -106,7 +106,7 @@ def deploy service_options.delete(:placement_strategy) end @response = @client.create_service(service_options) - EcsDeploy.logger.info "create service [#{@service_name}] [#{@cluster}] [#{@region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "created service [#{@service_name}] [#{@cluster}] [#{@region}] [#{Paint['OK', :green]}]" else return delete_service if @delete @@ -122,7 +122,7 @@ def deploy service_options.delete(:placement_strategy) end @response = @client.update_service(service_options) - EcsDeploy.logger.info "update service [#{@service_name}] [#{@cluster}] [#{@region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "updated service [#{@service_name}] [#{@cluster}] [#{@region}] [#{Paint['OK', :green]}]" end end @@ -149,7 +149,7 @@ def delete_service sleep 1 end @client.delete_service(cluster: @cluster, service: @service_name) - EcsDeploy.logger.info "delete service [#{@service_name}] [#{@cluster}] [#{@region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "deleted service [#{@service_name}] [#{@cluster}] [#{@region}] [#{Paint['OK', :green]}]" end def update_tags(service_name, tags) @@ -191,7 +191,7 @@ def self.wait_all_running(services) ss.reject(&:delete).map(&:service_name).each_slice(MAX_DESCRIBE_SERVICES).map do |chunked_service_names| Thread.new do EcsDeploy.config.ecs_wait_until_services_stable_max_attempts.times do - EcsDeploy.logger.info "wait service stable [#{chunked_service_names.join(", ")}] [#{cl}]" + EcsDeploy.logger.info "waiting for services to stabilize [#{chunked_service_names.join(", ")}] [#{cl}]" resp = client.describe_services(cluster: cl, services: chunked_service_names) resp.services.each do |s| # cf. https://github.com/aws/aws-sdk-ruby/blob/master/gems/aws-sdk-ecs/lib/aws-sdk-ecs/waiters.rb#L91-L96 diff --git a/lib/ecs_deploy/task_definition.rb b/lib/ecs_deploy/task_definition.rb index 4c05f0a..602beb9 100644 --- a/lib/ecs_deploy/task_definition.rb +++ b/lib/ecs_deploy/task_definition.rb @@ -7,7 +7,7 @@ def self.deregister(arn, region: nil) client.deregister_task_definition({ task_definition: arn, }) - EcsDeploy.logger.info "deregister task definition [#{arn}] [#{client.config.region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "deregistered task definition [#{arn}] [#{client.config.region}] [#{Paint['OK', :green]}]" end def initialize( @@ -71,7 +71,7 @@ def register tags: @tags, runtime_platform: @runtime_platform }) - EcsDeploy.logger.info "register task definition [#{@task_definition_name}] [#{@region}] [#{Paint['OK', :green]}]" + EcsDeploy.logger.info "registered task definition [#{@task_definition_name}] [#{@region}] [#{Paint['OK', :green]}]" res.task_definition end end From 4584595fdae99933b81903f82a04d039326968ae Mon Sep 17 00:00:00 2001 From: Gareth Jones Date: Mon, 10 Apr 2023 13:01:05 +1200 Subject: [PATCH 2/4] docs: improve grammar and structure of readme --- README.md | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index a2d2e01..01ddc33 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,26 @@ # EcsDeploy -Helper script for deployment to Amazon ECS. +Helper script for deployment to Amazon ECS, designed to be compatible with `capistrano`. This gem is experimental. -Main purpose is combination with capistrano API. - ## Installation Add this line to your application's Gemfile: ```ruby -gem 'ecs_deploy', github: "reproio/ecs_deploy" +gem "ecs_deploy", github: "reproio/ecs_deploy" ``` And then execute: $ bundle -## Usage - -Use by Capistrano. +## Configuration ```ruby # Capfile -require 'ecs_deploy/capistrano' +require "ecs_deploy/capistrano" # deploy.rb set :ecs_default_cluster, "ecs-cluster-name" @@ -93,11 +89,11 @@ set :ecs_tasks, [ set :ecs_scheduled_tasks, [ { - cluster: "default", # Unless this key, use fetch(:ecs_default_cluster) + cluster: "default", # Defaults to fetch(:ecs_default_cluster) rule_name: "schedule_name", schedule_expression: "cron(0 12 * * ? *)", description: "schedule_description", # Optional - target_id: "task_name", # Unless this key, use task_definition_name + target_id: "task_name", # Defaults to the task_definition_name task_definition_name: "myapp-#{fetch(:rails_env)}", task_count: 2, # Default 1 revision: 12, # Optional @@ -130,12 +126,14 @@ set :ecs_services, [ ] ``` +## Usage + ```sh -cap ecs:register_task_definition # register ecs_tasks as TaskDefinition -cap ecs:deploy_scheduled_task # register ecs_scheduled_tasks to CloudWatchEvent -cap ecs:deploy # create or update Service by ecs_services info +bundle exec cap ecs:register_task_definition # register ecs_tasks as TaskDefinition +bundle exec cap ecs:deploy_scheduled_task # register ecs_scheduled_tasks to CloudWatchEvent +bundle exec cap ecs:deploy # create or update Service by ecs_services info -cap ecs:rollback # deregister current task definition and update Service by previous revision of current task definition +bundle exec cap ecs:rollback # deregister current task definition and update Service by previous revision of current task definition ``` ### Rollback example @@ -194,7 +192,7 @@ The autoscaler of `ecs_deploy` supports auto scaling of ECS services and cluster ### Prerequisits -* You use a ECS cluster whose instances belong to either an auto scaling group or a spot fleet request +* An ECS cluster whose instances belong to either an autoscaling group or a spot fleet request * You have CloudWatch alarms and you want to scale services when their state changes ### How to use autoscaler @@ -283,7 +281,7 @@ Then, execute the following command: ecs_auto_scaler ``` -I recommends deploy `ecs_auto_scaler` on ECS too. +It is recommended to run the `ecs_auto_scaler` via a container on ECS. ### Signals @@ -460,7 +458,7 @@ The following permissions are required for the preceding configuration of "repro ### How to deploy faster with Auto Scaling Group -Add following configuration to your deploy.rb and hooks if you need. +Add the following configuration and hooks to your `config/deploy.rb`: ```ruby # deploy.rb From 01cae90ef0f9d6705d312cbfa966a75f92527dd3 Mon Sep 17 00:00:00 2001 From: Gareth Jones Date: Fri, 4 Aug 2023 14:44:59 +1200 Subject: [PATCH 3/4] Update README.md Co-authored-by: Koki Koyama --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 01ddc33..dcb84b9 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ The autoscaler of `ecs_deploy` supports auto scaling of ECS services and cluster ### Prerequisits -* An ECS cluster whose instances belong to either an autoscaling group or a spot fleet request +* An ECS cluster whose instances belong to either an Auto Scaling group or a Spot Fleet request * You have CloudWatch alarms and you want to scale services when their state changes ### How to use autoscaler From 9555d7fff84fa731ffc1ac6c7f8c6558b2b72fa6 Mon Sep 17 00:00:00 2001 From: Gareth Jones Date: Fri, 4 Aug 2023 02:56:33 +0000 Subject: [PATCH 4/4] test: update specs --- spec/ecs_deploy/instance_fluctuation_manager_spec.rb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spec/ecs_deploy/instance_fluctuation_manager_spec.rb b/spec/ecs_deploy/instance_fluctuation_manager_spec.rb index 0c421bf..fcf721a 100644 --- a/spec/ecs_deploy/instance_fluctuation_manager_spec.rb +++ b/spec/ecs_deploy/instance_fluctuation_manager_spec.rb @@ -60,7 +60,7 @@ thread = instance_fluctuation_manager.increase thread.join log = logdev.string - expect(log).to include("Increase desired capacity of asg-cluster: 50 => 100") + expect(log).to include("Increasing desired capacity of asg-cluster: 50 => 100") [60, 70, 80, 90].each do |count| expect(log).to include("Current registered instance count: #{count}") end @@ -151,7 +151,7 @@ it "succeeded in decreasing instances" do instance_fluctuation_manager.decrease log = logdev.string - expect(log).to include("Decrease desired capacity of asg-cluster: 100 => 50") + expect(log).to include("Decreasing desired capacity of asg-cluster: 100 => 50") expect(log).to include("Succeeded in decreasing instances!") instance_size_per_az = log.lines.grep(/AZ balance/).last.scan(/AZ balance: \{"zone-a"=>(\d+), "zone-b"=>(\d+)\}/).flatten.map(&:to_i) expect(instance_size_per_az).to contain_exactly(25, 25) @@ -251,7 +251,7 @@ it "succeeded in decreasing instances" do instance_fluctuation_manager.decrease log = logdev.string - expect(log).to include("Decrease desired capacity of asg-cluster: 100 => 60") + expect(log).to include("Decreasing desired capacity of asg-cluster: 100 => 60") expect(log).to include("Succeeded in decreasing instances!") instance_size_per_az = log.lines.grep(/AZ balance/).last.scan(/AZ balance: \{"zone-a"=>(\d+), "zone-b"=>(\d+), "zone-c"=>(\d+)\}/).flatten.map(&:to_i) expect(instance_size_per_az).to contain_exactly(20, 20, 20) @@ -272,7 +272,7 @@ it "succeeded in decreasing instances" do instance_fluctuation_manager.decrease log = logdev.string - expect(log).to include("Decrease desired capacity of asg-cluster: 100 => 53") + expect(log).to include("Decreasing desired capacity of asg-cluster: 100 => 53") expect(log).to include("Succeeded in decreasing instances!") instance_size_per_az = log.lines.grep(/AZ balance/).last.scan(/AZ balance: \{"zone-a"=>(\d+), "zone-b"=>(\d+), "zone-c"=>(\d+)\}/).flatten.map(&:to_i) expect(instance_size_per_az).to contain_exactly(17, 18, 18) @@ -394,7 +394,7 @@ instance_fluctuation_manager.decrease log = logdev.string - expect(log).to include("Decrease desired capacity of asg-cluster: 1 => 0") + expect(log).to include("Decreasing desired capacity of asg-cluster: 1 => 0") expect(log).to include("Succeeded in decreasing instances!") end end