forked from ekristen/aws-nuke
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathecs-task.go
145 lines (114 loc) · 3.12 KB
/
ecs-task.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
package resources
import (
"context"
"github.com/sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/ecs/ecsiface"
"github.com/ekristen/libnuke/pkg/registry"
"github.com/ekristen/libnuke/pkg/resource"
"github.com/ekristen/libnuke/pkg/types"
"github.com/ekristen/aws-nuke/v3/pkg/nuke"
)
const ECSTaskResource = "ECSTask"
func init() {
registry.Register(®istry.Registration{
Name: ECSTaskResource,
Scope: nuke.Account,
Lister: &ECSTaskLister{},
})
}
type ECSTaskLister struct {
mockSvc ecsiface.ECSAPI
}
func (l *ECSTaskLister) List(_ context.Context, o interface{}) ([]resource.Resource, error) {
opts := o.(*nuke.ListerOpts)
resources := make([]resource.Resource, 0)
var svc ecsiface.ECSAPI
if l.mockSvc != nil {
svc = l.mockSvc
} else {
svc = ecs.New(opts.Session)
}
var clusters []*string
clusterParams := &ecs.ListClustersInput{
MaxResults: aws.Int64(100),
}
// Discover all clusters
for {
output, err := svc.ListClusters(clusterParams)
if err != nil {
return nil, err
}
clusters = append(clusters, output.ClusterArns...)
if output.NextToken == nil {
break
}
clusterParams.NextToken = output.NextToken
}
// Discover all running tasks from all clusters
for _, clusterArn := range clusters {
taskParams := &ecs.ListTasksInput{
Cluster: clusterArn,
MaxResults: aws.Int64(10),
DesiredStatus: aws.String("RUNNING"),
}
output, err := svc.ListTasks(taskParams)
if err != nil {
return nil, err
}
for _, taskArn := range output.TaskArns {
ecsTask := &ECSTask{
svc: svc,
taskARN: taskArn,
clusterARN: clusterArn,
}
tags, err := svc.ListTagsForResource(&ecs.ListTagsForResourceInput{
ResourceArn: taskArn,
})
if err != nil {
logrus.WithError(err).Error("unable to get tags for resource")
}
if tags != nil {
ecsTask.tags = tags.Tags
}
resources = append(resources, ecsTask)
}
if output.NextToken == nil {
continue
}
taskParams.NextToken = output.NextToken
}
return resources, nil
}
type ECSTask struct {
svc ecsiface.ECSAPI
taskARN *string
clusterARN *string
tags []*ecs.Tag
}
func (t *ECSTask) Filter() error {
return nil
}
func (t *ECSTask) Properties() types.Properties {
properties := types.NewProperties()
properties.Set("TaskARN", t.taskARN)
properties.Set("ClusterARN", t.clusterARN)
for _, tag := range t.tags {
properties.SetTag(tag.Key, tag.Value)
}
return properties
}
func (t *ECSTask) Remove(_ context.Context) error {
// When StopTask is called on a task, the equivalent of docker stop is issued to the
// containers running in the task. This results in a SIGTERM value and a default
// 30-second timeout, after which the SIGKILL value is sent and the containers are
// forcibly stopped. If the container handles the SIGTERM value gracefully and exits
// within 30 seconds from receiving it, no SIGKILL value is sent.
_, err := t.svc.StopTask(&ecs.StopTaskInput{
Cluster: t.clusterARN,
Task: t.taskARN,
Reason: aws.String("Task stopped via AWS Nuke"),
})
return err
}