-
Notifications
You must be signed in to change notification settings - Fork 342
228 lines (202 loc) · 8.8 KB
/
ci-test-ginkgo.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
name: ci-test-ginkgo
on:
push:
branches: [main]
paths:
- "KubeArmor/**"
- "tests/**"
- "protobuf/**"
- ".github/workflows/ci-test-ginkgo.yml"
- "pkg/KubeArmorOperator/**"
- "deployments/helm/**"
pull_request:
branches: [main]
paths:
- "KubeArmor/**"
- "tests/**"
- "protobuf/**"
- ".github/workflows/ci-test-ginkgo.yml"
- "examples/multiubuntu/build/**"
- "pkg/KubeArmorOperator/**"
- "deployments/helm/**"
# Declare default permissions as read only.
permissions: read-all
jobs:
build:
name: Auto-testing Framework / ${{ matrix.os }} / ${{ matrix.runtime }}
runs-on: ${{ matrix.os }}
env:
RUNTIME: ${{ matrix.runtime }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-20.04]
runtime: ["containerd", "crio"]
steps:
- uses: actions/checkout@v3
with:
submodules: true
- uses: actions/setup-go@v5
with:
go-version-file: 'KubeArmor/go.mod'
- name: Check what paths were updated
uses: dorny/paths-filter@v2
id: filter
with:
filters: |
controller:
- 'pkg/KubeArmorController/**'
- name: Install the latest LLVM toolchain
run: ./.github/workflows/install-llvm.sh
- name: Compile libbpf
run: ./.github/workflows/install-libbpf.sh
- name: Setup a Kubernetes environment
run: ./.github/workflows/install-k3s.sh
- name: Generate KubeArmor artifacts
run: |
#set the $IS_COVERAGE env var to 'true' to build the kubearmor-test image for coverage calculation
export IS_COVERAGE=true
GITHUB_SHA=$GITHUB_SHA ./KubeArmor/build/build_kubearmor.sh
- name: Build Kubearmor-Operator
working-directory: pkg/KubeArmorOperator
run: |
make docker-build
- name: Build KubeArmorController
if: steps.filter.outputs.controller == 'true'
run: make -C pkg/KubeArmorController/ docker-build TAG=latest
- name: deploy pre existing pod
run: |
kubectl apply -f ./tests/k8s_env/ksp/pre-run-pod.yaml
sleep 60
kubectl get pods -A
- name: Run KubeArmor
run: |
if [[ ${{ matrix.runtime }} == "containerd" ]]; then
docker save kubearmor/kubearmor-test-init:latest | sudo k3s ctr images import -
docker save kubearmor/kubearmor-test:latest | sudo k3s ctr images import -
docker save kubearmor/kubearmor-operator:latest | sudo k3s ctr images import -
docker save kubearmor/kubearmor-snitch:latest | sudo k3s ctr images import -
if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then
docker save kubearmor/kubearmor-controller:latest | sudo k3s ctr images import -
fi
else
if [ ${{ matrix.runtime }} == "crio" ]; then
docker save kubearmor/kubearmor-test-init:latest | sudo podman load
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-test-init:latest
docker save kubearmor/kubearmor-test:latest | sudo podman load
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-test:latest
docker save kubearmor/kubearmor-operator:latest | sudo podman load
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-operator:latest
docker save kubearmor/kubearmor-snitch:latest | sudo podman load
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-snitch:latest
if [ ${{ steps.filter.outputs.controller }} == 'true' ]; then
docker save kubearmor/kubearmor-controller:latest | sudo podman load
sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-controller:latest
fi
fi
fi
docker system prune -a -f
docker buildx prune -a -f
helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest
kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator
kubectl get pods -A
if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-coverage.yaml --dry-run=client -o json | \
jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never"' | \
kubectl apply -f -
else
kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-coverage.yaml
fi
kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test
kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch,kubearmor-app!=kubearmor-controller -n kubearmor
kubectl wait --timeout=1m --for=condition=ready pod -l kubearmor-app=kubearmor-controller -n kubearmor
kubectl get pods -A
sleep 10
DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}')
echo "DaemonSet: $DAEMONSET_NAME"
kubectl patch daemonset $DAEMONSET_NAME -n kubearmor --type='json' -p='[
{
"op": "add",
"path": "/spec/template/spec/volumes/-",
"value": {
"name": "coverage-storage",
"hostPath": {
"path": "/coverage",
"type": "DirectoryOrCreate"
}
}
},
{
"op": "add",
"path": "/spec/template/spec/containers/0/volumeMounts/-",
"value": {
"mountPath": "/coverage",
"name": "coverage-storage"
}
},
{
"op": "add",
"path": "/spec/template/spec/containers/0/args/-",
"value": "-test.coverprofile=/coverage/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out"
}
]'
sleep 15
- name: Get KubeArmor POD info
run: |
DAEMONSET_NAME=$(kubectl get daemonset -n kubearmor -o jsonpath='{.items[0].metadata.name}')
LABEL_SELECTOR=$(kubectl get daemonset $DAEMONSET_NAME -n kubearmor -o jsonpath='{.spec.selector.matchLabels}' | jq -r 'to_entries[] | "\(.key)=\(.value)"' | paste -sd, -)
POD_NAME=$(kubectl get pods -n kubearmor -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}')
echo "Pod: $POD_NAME"
echo "POD_NAME=$POD_NAME" >> $GITHUB_ENV
- name: Test KubeArmor using Ginkgo
run: |
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
make
working-directory: ./tests/k8s_env
timeout-minutes: 30
- name: Kill KubeArmor prcoess in the pod
run: |
KUBEARMOR_PID=$(kubectl exec ${{ env.POD_NAME }} -n kubearmor -c kubearmor -- sh -c "ps aux | grep '[K]ubeArmor/kubearmor-test' | awk '{print \$1}'")
kubectl exec ${{ env.POD_NAME }} -n kubearmor -c kubearmor -- sh -c "kill -s SIGINT $KUBEARMOR_PID"
sleep 10
env:
POD_NAME: ${{ env.POD_NAME }}
- name: Extract coverage file
run: |
for i in {1..24}; do
if [ -f /coverage/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out ]; then
cp /coverage/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out
break
fi
sleep 5
done
ls -l
working-directory: KubeArmor
- name: Get karmor sysdump
if: ${{ failure() }}
run: |
kubectl describe pod -n kubearmor -l kubearmor-app=kubearmor
curl -sfL http://get.kubearmor.io/ | sudo sh -s -- -b /usr/local/bin
mkdir -p /tmp/kubearmor/ && cd /tmp/kubearmor && karmor sysdump
- name: Archive log artifacts
if: ${{ failure() }}
uses: actions/upload-artifact@v3
with:
name: kubearmor.logs
path: |
/tmp/kubearmor/
/tmp/kubearmor.*
- name: Measure code coverage
if: ${{ always() }}
run: |
ls -l
go tool cover -func coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out
working-directory: KubeArmor
env:
GOPATH: /home/runner/go
- name: Upload coverage file
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: coverage-k8s-${{ matrix.os }}-${{ matrix.runtime }}
path: KubeArmor/coverage_k8s_${{ matrix.os }}_${{ matrix.runtime }}.out